aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c196
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/intel-iommu.c31
-rw-r--r--drivers/iommu/omap-iommu-debug.c8
-rw-r--r--drivers/iommu/omap-iommu.c107
-rw-r--r--drivers/iommu/omap-iommu.h225
-rw-r--r--drivers/iommu/omap-iommu2.c334
-rw-r--r--drivers/iommu/omap-iopgtable.h98
-rw-r--r--drivers/iommu/omap-iovmm.c50
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/tegra-smmu.c6
-rw-r--r--drivers/media/platform/omap3isp/isp.c1
-rw-r--r--drivers/media/platform/omap3isp/isp.h4
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c1
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c1
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c3
17 files changed, 966 insertions, 103 deletions
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 14a4d5fc94fa..f66b816d455c 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_DMAR_TABLE) += dmar.o
7obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o 7obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
8obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o 8obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
9obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o 9obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
10obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
10obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o 11obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
11obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o 12obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
12obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o 13obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 55074cba20eb..c1c74e030a58 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -57,17 +57,9 @@
57 * physically contiguous memory regions it is mapping into page sizes 57 * physically contiguous memory regions it is mapping into page sizes
58 * that we support. 58 * that we support.
59 * 59 *
60 * Traditionally the IOMMU core just handed us the mappings directly, 60 * 512GB Pages are not supported due to a hardware bug
61 * after making sure the size is an order of a 4KiB page and that the
62 * mapping has natural alignment.
63 *
64 * To retain this behavior, we currently advertise that we support
65 * all page sizes that are an order of 4KiB.
66 *
67 * If at some point we'd like to utilize the IOMMU core's new behavior,
68 * we could change this to advertise the real page sizes we support.
69 */ 61 */
70#define AMD_IOMMU_PGSIZES (~0xFFFUL) 62#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
71 63
72static DEFINE_RWLOCK(amd_iommu_devtable_lock); 64static DEFINE_RWLOCK(amd_iommu_devtable_lock);
73 65
@@ -140,6 +132,9 @@ static void free_dev_data(struct iommu_dev_data *dev_data)
140 list_del(&dev_data->dev_data_list); 132 list_del(&dev_data->dev_data_list);
141 spin_unlock_irqrestore(&dev_data_list_lock, flags); 133 spin_unlock_irqrestore(&dev_data_list_lock, flags);
142 134
135 if (dev_data->group)
136 iommu_group_put(dev_data->group);
137
143 kfree(dev_data); 138 kfree(dev_data);
144} 139}
145 140
@@ -274,41 +269,23 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
274 *from = to; 269 *from = to;
275} 270}
276 271
277#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 272static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
278
279static int iommu_init_device(struct device *dev)
280{ 273{
281 struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev); 274 while (!bus->self) {
282 struct iommu_dev_data *dev_data; 275 if (!pci_is_root_bus(bus))
283 struct iommu_group *group; 276 bus = bus->parent;
284 u16 alias; 277 else
285 int ret; 278 return ERR_PTR(-ENODEV);
286 279 }
287 if (dev->archdata.iommu)
288 return 0;
289
290 dev_data = find_dev_data(get_device_id(dev));
291 if (!dev_data)
292 return -ENOMEM;
293
294 alias = amd_iommu_alias_table[dev_data->devid];
295 if (alias != dev_data->devid) {
296 struct iommu_dev_data *alias_data;
297 280
298 alias_data = find_dev_data(alias); 281 return bus;
299 if (alias_data == NULL) { 282}
300 pr_err("AMD-Vi: Warning: Unhandled device %s\n",
301 dev_name(dev));
302 free_dev_data(dev_data);
303 return -ENOTSUPP;
304 }
305 dev_data->alias_data = alias_data;
306 283
307 dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff); 284#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
308 }
309 285
310 if (dma_pdev == NULL) 286static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
311 dma_pdev = pci_dev_get(pdev); 287{
288 struct pci_dev *dma_pdev = pdev;
312 289
313 /* Account for quirked devices */ 290 /* Account for quirked devices */
314 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); 291 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
@@ -330,14 +307,9 @@ static int iommu_init_device(struct device *dev)
330 * Finding the next device may require skipping virtual buses. 307 * Finding the next device may require skipping virtual buses.
331 */ 308 */
332 while (!pci_is_root_bus(dma_pdev->bus)) { 309 while (!pci_is_root_bus(dma_pdev->bus)) {
333 struct pci_bus *bus = dma_pdev->bus; 310 struct pci_bus *bus = find_hosted_bus(dma_pdev->bus);
334 311 if (IS_ERR(bus))
335 while (!bus->self) { 312 break;
336 if (!pci_is_root_bus(bus))
337 bus = bus->parent;
338 else
339 goto root_bus;
340 }
341 313
342 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 314 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
343 break; 315 break;
@@ -345,19 +317,137 @@ static int iommu_init_device(struct device *dev)
345 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); 317 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
346 } 318 }
347 319
348root_bus: 320 return dma_pdev;
349 group = iommu_group_get(&dma_pdev->dev); 321}
350 pci_dev_put(dma_pdev); 322
323static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev)
324{
325 struct iommu_group *group = iommu_group_get(&pdev->dev);
326 int ret;
327
351 if (!group) { 328 if (!group) {
352 group = iommu_group_alloc(); 329 group = iommu_group_alloc();
353 if (IS_ERR(group)) 330 if (IS_ERR(group))
354 return PTR_ERR(group); 331 return PTR_ERR(group);
332
333 WARN_ON(&pdev->dev != dev);
355 } 334 }
356 335
357 ret = iommu_group_add_device(group, dev); 336 ret = iommu_group_add_device(group, dev);
358
359 iommu_group_put(group); 337 iommu_group_put(group);
338 return ret;
339}
340
341static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data,
342 struct device *dev)
343{
344 if (!dev_data->group) {
345 struct iommu_group *group = iommu_group_alloc();
346 if (IS_ERR(group))
347 return PTR_ERR(group);
348
349 dev_data->group = group;
350 }
351
352 return iommu_group_add_device(dev_data->group, dev);
353}
354
355static int init_iommu_group(struct device *dev)
356{
357 struct iommu_dev_data *dev_data;
358 struct iommu_group *group;
359 struct pci_dev *dma_pdev;
360 int ret;
361
362 group = iommu_group_get(dev);
363 if (group) {
364 iommu_group_put(group);
365 return 0;
366 }
367
368 dev_data = find_dev_data(get_device_id(dev));
369 if (!dev_data)
370 return -ENOMEM;
371
372 if (dev_data->alias_data) {
373 u16 alias;
374 struct pci_bus *bus;
375
376 if (dev_data->alias_data->group)
377 goto use_group;
378
379 /*
380 * If the alias device exists, it's effectively just a first
381 * level quirk for finding the DMA source.
382 */
383 alias = amd_iommu_alias_table[dev_data->devid];
384 dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
385 if (dma_pdev) {
386 dma_pdev = get_isolation_root(dma_pdev);
387 goto use_pdev;
388 }
389
390 /*
391 * If the alias is virtual, try to find a parent device
392 * and test whether the IOMMU group is actualy rooted above
393 * the alias. Be careful to also test the parent device if
394 * we think the alias is the root of the group.
395 */
396 bus = pci_find_bus(0, alias >> 8);
397 if (!bus)
398 goto use_group;
399
400 bus = find_hosted_bus(bus);
401 if (IS_ERR(bus) || !bus->self)
402 goto use_group;
403
404 dma_pdev = get_isolation_root(pci_dev_get(bus->self));
405 if (dma_pdev != bus->self || (dma_pdev->multifunction &&
406 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)))
407 goto use_pdev;
408
409 pci_dev_put(dma_pdev);
410 goto use_group;
411 }
412
413 dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev)));
414use_pdev:
415 ret = use_pdev_iommu_group(dma_pdev, dev);
416 pci_dev_put(dma_pdev);
417 return ret;
418use_group:
419 return use_dev_data_iommu_group(dev_data->alias_data, dev);
420}
421
422static int iommu_init_device(struct device *dev)
423{
424 struct pci_dev *pdev = to_pci_dev(dev);
425 struct iommu_dev_data *dev_data;
426 u16 alias;
427 int ret;
428
429 if (dev->archdata.iommu)
430 return 0;
431
432 dev_data = find_dev_data(get_device_id(dev));
433 if (!dev_data)
434 return -ENOMEM;
435
436 alias = amd_iommu_alias_table[dev_data->devid];
437 if (alias != dev_data->devid) {
438 struct iommu_dev_data *alias_data;
439
440 alias_data = find_dev_data(alias);
441 if (alias_data == NULL) {
442 pr_err("AMD-Vi: Warning: Unhandled device %s\n",
443 dev_name(dev));
444 free_dev_data(dev_data);
445 return -ENOTSUPP;
446 }
447 dev_data->alias_data = alias_data;
448 }
360 449
450 ret = init_iommu_group(dev);
361 if (ret) 451 if (ret)
362 return ret; 452 return ret;
363 453
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c9aa3d079ff0..e38ab438bb34 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -426,6 +426,7 @@ struct iommu_dev_data {
426 struct iommu_dev_data *alias_data;/* The alias dev_data */ 426 struct iommu_dev_data *alias_data;/* The alias dev_data */
427 struct protection_domain *domain; /* Domain the device is bound to */ 427 struct protection_domain *domain; /* Domain the device is bound to */
428 atomic_t bind; /* Domain attach reference count */ 428 atomic_t bind; /* Domain attach reference count */
429 struct iommu_group *group; /* IOMMU group for virtual aliases */
429 u16 devid; /* PCI Device ID */ 430 u16 devid; /* PCI Device ID */
430 bool iommu_v2; /* Device can make use of IOMMUv2 */ 431 bool iommu_v2; /* Device can make use of IOMMUv2 */
431 bool passthrough; /* Default for device is pt_domain */ 432 bool passthrough; /* Default for device is pt_domain */
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0badfa48b32b..f7fd3d0aeb4c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2320,8 +2320,39 @@ static int domain_add_dev_info(struct dmar_domain *domain,
2320 return 0; 2320 return 0;
2321} 2321}
2322 2322
2323static bool device_has_rmrr(struct pci_dev *dev)
2324{
2325 struct dmar_rmrr_unit *rmrr;
2326 int i;
2327
2328 for_each_rmrr_units(rmrr) {
2329 for (i = 0; i < rmrr->devices_cnt; i++) {
2330 /*
2331 * Return TRUE if this RMRR contains the device that
2332 * is passed in.
2333 */
2334 if (rmrr->devices[i] == dev)
2335 return true;
2336 }
2337 }
2338 return false;
2339}
2340
2323static int iommu_should_identity_map(struct pci_dev *pdev, int startup) 2341static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2324{ 2342{
2343
2344 /*
2345 * We want to prevent any device associated with an RMRR from
2346 * getting placed into the SI Domain. This is done because
2347 * problems exist when devices are moved in and out of domains
2348 * and their respective RMRR info is lost. We exempt USB devices
2349 * from this process due to their usage of RMRRs that are known
2350 * to not be needed after BIOS hand-off to OS.
2351 */
2352 if (device_has_rmrr(pdev) &&
2353 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2354 return 0;
2355
2325 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) 2356 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2326 return 1; 2357 return 1;
2327 2358
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index f55fc5dfbadc..d97fbe4fb9b1 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -18,11 +18,11 @@
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/debugfs.h> 20#include <linux/debugfs.h>
21#include <linux/omap-iommu.h>
22#include <linux/platform_data/iommu-omap.h>
21 23
22#include <plat/iommu.h> 24#include "omap-iopgtable.h"
23#include <plat/iovmm.h> 25#include "omap-iommu.h"
24
25#include <plat/iopgtable.h>
26 26
27#define MAXCOLUMN 100 /* for short messages */ 27#define MAXCOLUMN 100 /* for short messages */
28 28
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index d0b1234581be..18108c1405e2 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -16,17 +16,20 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/ioport.h> 18#include <linux/ioport.h>
19#include <linux/clk.h>
20#include <linux/platform_device.h> 19#include <linux/platform_device.h>
21#include <linux/iommu.h> 20#include <linux/iommu.h>
21#include <linux/omap-iommu.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/io.h>
25#include <linux/pm_runtime.h>
24 26
25#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
26 28
27#include <plat/iommu.h> 29#include <linux/platform_data/iommu-omap.h>
28 30
29#include <plat/iopgtable.h> 31#include "omap-iopgtable.h"
32#include "omap-iommu.h"
30 33
31#define for_each_iotlb_cr(obj, n, __i, cr) \ 34#define for_each_iotlb_cr(obj, n, __i, cr) \
32 for (__i = 0; \ 35 for (__i = 0; \
@@ -51,6 +54,21 @@ struct omap_iommu_domain {
51 spinlock_t lock; 54 spinlock_t lock;
52}; 55};
53 56
57#define MMU_LOCK_BASE_SHIFT 10
58#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
59#define MMU_LOCK_BASE(x) \
60 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
61
62#define MMU_LOCK_VICT_SHIFT 4
63#define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
64#define MMU_LOCK_VICT(x) \
65 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
66
67struct iotlb_lock {
68 short base;
69 short vict;
70};
71
54/* accommodate the difference between omap1 and omap2/3 */ 72/* accommodate the difference between omap1 and omap2/3 */
55static const struct iommu_functions *arch_iommu; 73static const struct iommu_functions *arch_iommu;
56 74
@@ -125,31 +143,44 @@ EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
125static int iommu_enable(struct omap_iommu *obj) 143static int iommu_enable(struct omap_iommu *obj)
126{ 144{
127 int err; 145 int err;
146 struct platform_device *pdev = to_platform_device(obj->dev);
147 struct iommu_platform_data *pdata = pdev->dev.platform_data;
128 148
129 if (!obj) 149 if (!obj || !pdata)
130 return -EINVAL; 150 return -EINVAL;
131 151
132 if (!arch_iommu) 152 if (!arch_iommu)
133 return -ENODEV; 153 return -ENODEV;
134 154
135 clk_enable(obj->clk); 155 if (pdata->deassert_reset) {
156 err = pdata->deassert_reset(pdev, pdata->reset_name);
157 if (err) {
158 dev_err(obj->dev, "deassert_reset failed: %d\n", err);
159 return err;
160 }
161 }
162
163 pm_runtime_get_sync(obj->dev);
136 164
137 err = arch_iommu->enable(obj); 165 err = arch_iommu->enable(obj);
138 166
139 clk_disable(obj->clk);
140 return err; 167 return err;
141} 168}
142 169
143static void iommu_disable(struct omap_iommu *obj) 170static void iommu_disable(struct omap_iommu *obj)
144{ 171{
145 if (!obj) 172 struct platform_device *pdev = to_platform_device(obj->dev);
146 return; 173 struct iommu_platform_data *pdata = pdev->dev.platform_data;
147 174
148 clk_enable(obj->clk); 175 if (!obj || !pdata)
176 return;
149 177
150 arch_iommu->disable(obj); 178 arch_iommu->disable(obj);
151 179
152 clk_disable(obj->clk); 180 pm_runtime_put_sync(obj->dev);
181
182 if (pdata->assert_reset)
183 pdata->assert_reset(pdev, pdata->reset_name);
153} 184}
154 185
155/* 186/*
@@ -272,7 +303,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
272 if (!obj || !obj->nr_tlb_entries || !e) 303 if (!obj || !obj->nr_tlb_entries || !e)
273 return -EINVAL; 304 return -EINVAL;
274 305
275 clk_enable(obj->clk); 306 pm_runtime_get_sync(obj->dev);
276 307
277 iotlb_lock_get(obj, &l); 308 iotlb_lock_get(obj, &l);
278 if (l.base == obj->nr_tlb_entries) { 309 if (l.base == obj->nr_tlb_entries) {
@@ -302,7 +333,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
302 333
303 cr = iotlb_alloc_cr(obj, e); 334 cr = iotlb_alloc_cr(obj, e);
304 if (IS_ERR(cr)) { 335 if (IS_ERR(cr)) {
305 clk_disable(obj->clk); 336 pm_runtime_put_sync(obj->dev);
306 return PTR_ERR(cr); 337 return PTR_ERR(cr);
307 } 338 }
308 339
@@ -316,7 +347,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
316 l.vict = l.base; 347 l.vict = l.base;
317 iotlb_lock_set(obj, &l); 348 iotlb_lock_set(obj, &l);
318out: 349out:
319 clk_disable(obj->clk); 350 pm_runtime_put_sync(obj->dev);
320 return err; 351 return err;
321} 352}
322 353
@@ -346,7 +377,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
346 int i; 377 int i;
347 struct cr_regs cr; 378 struct cr_regs cr;
348 379
349 clk_enable(obj->clk); 380 pm_runtime_get_sync(obj->dev);
350 381
351 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { 382 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
352 u32 start; 383 u32 start;
@@ -365,7 +396,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
365 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 396 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
366 } 397 }
367 } 398 }
368 clk_disable(obj->clk); 399 pm_runtime_put_sync(obj->dev);
369 400
370 if (i == obj->nr_tlb_entries) 401 if (i == obj->nr_tlb_entries)
371 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); 402 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
@@ -379,7 +410,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
379{ 410{
380 struct iotlb_lock l; 411 struct iotlb_lock l;
381 412
382 clk_enable(obj->clk); 413 pm_runtime_get_sync(obj->dev);
383 414
384 l.base = 0; 415 l.base = 0;
385 l.vict = 0; 416 l.vict = 0;
@@ -387,7 +418,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
387 418
388 iommu_write_reg(obj, 1, MMU_GFLUSH); 419 iommu_write_reg(obj, 1, MMU_GFLUSH);
389 420
390 clk_disable(obj->clk); 421 pm_runtime_put_sync(obj->dev);
391} 422}
392 423
393#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) 424#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
@@ -397,11 +428,11 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
397 if (!obj || !buf) 428 if (!obj || !buf)
398 return -EINVAL; 429 return -EINVAL;
399 430
400 clk_enable(obj->clk); 431 pm_runtime_get_sync(obj->dev);
401 432
402 bytes = arch_iommu->dump_ctx(obj, buf, bytes); 433 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
403 434
404 clk_disable(obj->clk); 435 pm_runtime_put_sync(obj->dev);
405 436
406 return bytes; 437 return bytes;
407} 438}
@@ -415,7 +446,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
415 struct cr_regs tmp; 446 struct cr_regs tmp;
416 struct cr_regs *p = crs; 447 struct cr_regs *p = crs;
417 448
418 clk_enable(obj->clk); 449 pm_runtime_get_sync(obj->dev);
419 iotlb_lock_get(obj, &saved); 450 iotlb_lock_get(obj, &saved);
420 451
421 for_each_iotlb_cr(obj, num, i, tmp) { 452 for_each_iotlb_cr(obj, num, i, tmp) {
@@ -425,7 +456,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
425 } 456 }
426 457
427 iotlb_lock_set(obj, &saved); 458 iotlb_lock_set(obj, &saved);
428 clk_disable(obj->clk); 459 pm_runtime_put_sync(obj->dev);
429 460
430 return p - crs; 461 return p - crs;
431} 462}
@@ -789,9 +820,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
789 if (!obj->refcount) 820 if (!obj->refcount)
790 return IRQ_NONE; 821 return IRQ_NONE;
791 822
792 clk_enable(obj->clk);
793 errs = iommu_report_fault(obj, &da); 823 errs = iommu_report_fault(obj, &da);
794 clk_disable(obj->clk);
795 if (errs == 0) 824 if (errs == 0)
796 return IRQ_HANDLED; 825 return IRQ_HANDLED;
797 826
@@ -913,17 +942,10 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
913 struct resource *res; 942 struct resource *res;
914 struct iommu_platform_data *pdata = pdev->dev.platform_data; 943 struct iommu_platform_data *pdata = pdev->dev.platform_data;
915 944
916 if (pdev->num_resources != 2)
917 return -EINVAL;
918
919 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 945 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
920 if (!obj) 946 if (!obj)
921 return -ENOMEM; 947 return -ENOMEM;
922 948
923 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
924 if (IS_ERR(obj->clk))
925 goto err_clk;
926
927 obj->nr_tlb_entries = pdata->nr_tlb_entries; 949 obj->nr_tlb_entries = pdata->nr_tlb_entries;
928 obj->name = pdata->name; 950 obj->name = pdata->name;
929 obj->dev = &pdev->dev; 951 obj->dev = &pdev->dev;
@@ -966,6 +988,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
966 goto err_irq; 988 goto err_irq;
967 platform_set_drvdata(pdev, obj); 989 platform_set_drvdata(pdev, obj);
968 990
991 pm_runtime_irq_safe(obj->dev);
992 pm_runtime_enable(obj->dev);
993
969 dev_info(&pdev->dev, "%s registered\n", obj->name); 994 dev_info(&pdev->dev, "%s registered\n", obj->name);
970 return 0; 995 return 0;
971 996
@@ -974,8 +999,6 @@ err_irq:
974err_ioremap: 999err_ioremap:
975 release_mem_region(res->start, resource_size(res)); 1000 release_mem_region(res->start, resource_size(res));
976err_mem: 1001err_mem:
977 clk_put(obj->clk);
978err_clk:
979 kfree(obj); 1002 kfree(obj);
980 return err; 1003 return err;
981} 1004}
@@ -996,7 +1019,8 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
996 release_mem_region(res->start, resource_size(res)); 1019 release_mem_region(res->start, resource_size(res));
997 iounmap(obj->regbase); 1020 iounmap(obj->regbase);
998 1021
999 clk_put(obj->clk); 1022 pm_runtime_disable(obj->dev);
1023
1000 dev_info(&pdev->dev, "%s removed\n", obj->name); 1024 dev_info(&pdev->dev, "%s removed\n", obj->name);
1001 kfree(obj); 1025 kfree(obj);
1002 return 0; 1026 return 0;
@@ -1015,6 +1039,23 @@ static void iopte_cachep_ctor(void *iopte)
1015 clean_dcache_area(iopte, IOPTE_TABLE_SIZE); 1039 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1016} 1040}
1017 1041
1042static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
1043 u32 flags)
1044{
1045 memset(e, 0, sizeof(*e));
1046
1047 e->da = da;
1048 e->pa = pa;
1049 e->valid = 1;
1050 /* FIXME: add OMAP1 support */
1051 e->pgsz = flags & MMU_CAM_PGSZ_MASK;
1052 e->endian = flags & MMU_RAM_ENDIAN_MASK;
1053 e->elsz = flags & MMU_RAM_ELSZ_MASK;
1054 e->mixed = flags & MMU_RAM_MIXED_MASK;
1055
1056 return iopgsz_to_bytes(e->pgsz);
1057}
1058
1018static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, 1059static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1019 phys_addr_t pa, size_t bytes, int prot) 1060 phys_addr_t pa, size_t bytes, int prot)
1020{ 1061{
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
new file mode 100644
index 000000000000..120084206602
--- /dev/null
+++ b/drivers/iommu/omap-iommu.h
@@ -0,0 +1,225 @@
1/*
2 * omap iommu: main structures
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#if defined(CONFIG_ARCH_OMAP1)
14#error "iommu for this processor not implemented yet"
15#endif
16
17struct iotlb_entry {
18 u32 da;
19 u32 pa;
20 u32 pgsz, prsvd, valid;
21 union {
22 u16 ap;
23 struct {
24 u32 endian, elsz, mixed;
25 };
26 };
27};
28
29struct omap_iommu {
30 const char *name;
31 struct module *owner;
32 void __iomem *regbase;
33 struct device *dev;
34 void *isr_priv;
35 struct iommu_domain *domain;
36
37 unsigned int refcount;
38 spinlock_t iommu_lock; /* global for this whole object */
39
40 /*
41 * We don't change iopgd for a situation like pgd for a task,
42 * but share it globally for each iommu.
43 */
44 u32 *iopgd;
45 spinlock_t page_table_lock; /* protect iopgd */
46
47 int nr_tlb_entries;
48
49 struct list_head mmap;
50 struct mutex mmap_lock; /* protect mmap */
51
52 void *ctx; /* iommu context: registres saved area */
53 u32 da_start;
54 u32 da_end;
55};
56
57struct cr_regs {
58 union {
59 struct {
60 u16 cam_l;
61 u16 cam_h;
62 };
63 u32 cam;
64 };
65 union {
66 struct {
67 u16 ram_l;
68 u16 ram_h;
69 };
70 u32 ram;
71 };
72};
73
74/* architecture specific functions */
75struct iommu_functions {
76 unsigned long version;
77
78 int (*enable)(struct omap_iommu *obj);
79 void (*disable)(struct omap_iommu *obj);
80 void (*set_twl)(struct omap_iommu *obj, bool on);
81 u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
82
83 void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
84 void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
85
86 struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
87 struct iotlb_entry *e);
88 int (*cr_valid)(struct cr_regs *cr);
89 u32 (*cr_to_virt)(struct cr_regs *cr);
90 void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
91 ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
92 char *buf);
93
94 u32 (*get_pte_attr)(struct iotlb_entry *e);
95
96 void (*save_ctx)(struct omap_iommu *obj);
97 void (*restore_ctx)(struct omap_iommu *obj);
98 ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
99};
100
101#ifdef CONFIG_IOMMU_API
102/**
103 * dev_to_omap_iommu() - retrieves an omap iommu object from a user device
104 * @dev: iommu client device
105 */
106static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
107{
108 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
109
110 return arch_data->iommu_dev;
111}
112#endif
113
114/*
115 * MMU Register offsets
116 */
117#define MMU_REVISION 0x00
118#define MMU_IRQSTATUS 0x18
119#define MMU_IRQENABLE 0x1c
120#define MMU_WALKING_ST 0x40
121#define MMU_CNTL 0x44
122#define MMU_FAULT_AD 0x48
123#define MMU_TTB 0x4c
124#define MMU_LOCK 0x50
125#define MMU_LD_TLB 0x54
126#define MMU_CAM 0x58
127#define MMU_RAM 0x5c
128#define MMU_GFLUSH 0x60
129#define MMU_FLUSH_ENTRY 0x64
130#define MMU_READ_CAM 0x68
131#define MMU_READ_RAM 0x6c
132#define MMU_EMU_FAULT_AD 0x70
133
134#define MMU_REG_SIZE 256
135
136/*
137 * MMU Register bit definitions
138 */
139#define MMU_CAM_VATAG_SHIFT 12
140#define MMU_CAM_VATAG_MASK \
141 ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
142#define MMU_CAM_P (1 << 3)
143#define MMU_CAM_V (1 << 2)
144#define MMU_CAM_PGSZ_MASK 3
145#define MMU_CAM_PGSZ_1M (0 << 0)
146#define MMU_CAM_PGSZ_64K (1 << 0)
147#define MMU_CAM_PGSZ_4K (2 << 0)
148#define MMU_CAM_PGSZ_16M (3 << 0)
149
150#define MMU_RAM_PADDR_SHIFT 12
151#define MMU_RAM_PADDR_MASK \
152 ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
153
154#define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT)
155#define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT)
156
157#define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT)
158#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT)
159#define MMU_RAM_ELSZ_16 (1 << MMU_RAM_ELSZ_SHIFT)
160#define MMU_RAM_ELSZ_32 (2 << MMU_RAM_ELSZ_SHIFT)
161#define MMU_RAM_ELSZ_NONE (3 << MMU_RAM_ELSZ_SHIFT)
162#define MMU_RAM_MIXED_SHIFT 6
163#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT)
164#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK
165
166/*
167 * utilities for super page(16MB, 1MB, 64KB and 4KB)
168 */
169
170#define iopgsz_max(bytes) \
171 (((bytes) >= SZ_16M) ? SZ_16M : \
172 ((bytes) >= SZ_1M) ? SZ_1M : \
173 ((bytes) >= SZ_64K) ? SZ_64K : \
174 ((bytes) >= SZ_4K) ? SZ_4K : 0)
175
176#define bytes_to_iopgsz(bytes) \
177 (((bytes) == SZ_16M) ? MMU_CAM_PGSZ_16M : \
178 ((bytes) == SZ_1M) ? MMU_CAM_PGSZ_1M : \
179 ((bytes) == SZ_64K) ? MMU_CAM_PGSZ_64K : \
180 ((bytes) == SZ_4K) ? MMU_CAM_PGSZ_4K : -1)
181
182#define iopgsz_to_bytes(iopgsz) \
183 (((iopgsz) == MMU_CAM_PGSZ_16M) ? SZ_16M : \
184 ((iopgsz) == MMU_CAM_PGSZ_1M) ? SZ_1M : \
185 ((iopgsz) == MMU_CAM_PGSZ_64K) ? SZ_64K : \
186 ((iopgsz) == MMU_CAM_PGSZ_4K) ? SZ_4K : 0)
187
188#define iopgsz_ok(bytes) (bytes_to_iopgsz(bytes) >= 0)
189
190/*
191 * global functions
192 */
193extern u32 omap_iommu_arch_version(void);
194
195extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
196
197extern int
198omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
199
200extern void omap_iommu_save_ctx(struct device *dev);
201extern void omap_iommu_restore_ctx(struct device *dev);
202
203extern int omap_foreach_iommu_device(void *data,
204 int (*fn)(struct device *, void *));
205
206extern int omap_install_iommu_arch(const struct iommu_functions *ops);
207extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
208
209extern ssize_t
210omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
211extern size_t
212omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
213
214/*
215 * register accessors
216 */
217static inline u32 iommu_read_reg(struct omap_iommu *obj, size_t offs)
218{
219 return __raw_readl(obj->regbase + offs);
220}
221
222static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
223{
224 __raw_writel(val, obj->regbase + offs);
225}
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c
new file mode 100644
index 000000000000..d745094a69dd
--- /dev/null
+++ b/drivers/iommu/omap-iommu2.c
@@ -0,0 +1,334 @@
1/*
2 * omap iommu: omap2/3 architecture specific functions
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/device.h>
16#include <linux/io.h>
17#include <linux/jiffies.h>
18#include <linux/module.h>
19#include <linux/omap-iommu.h>
20#include <linux/slab.h>
21#include <linux/stringify.h>
22#include <linux/platform_data/iommu-omap.h>
23
24#include "omap-iommu.h"
25
26/*
27 * omap2 architecture specific register bit definitions
28 */
29#define IOMMU_ARCH_VERSION 0x00000011
30
31/* IRQSTATUS & IRQENABLE */
32#define MMU_IRQ_MULTIHITFAULT (1 << 4)
33#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
34#define MMU_IRQ_EMUMISS (1 << 2)
35#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
36#define MMU_IRQ_TLBMISS (1 << 0)
37
38#define __MMU_IRQ_FAULT \
39 (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
40#define MMU_IRQ_MASK \
41 (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
42#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
43#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
44
45/* MMU_CNTL */
46#define MMU_CNTL_SHIFT 1
47#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
48#define MMU_CNTL_EML_TLB (1 << 3)
49#define MMU_CNTL_TWL_EN (1 << 2)
50#define MMU_CNTL_MMU_EN (1 << 1)
51
52#define get_cam_va_mask(pgsz) \
53 (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
54 ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
55 ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
56 ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
57
58/* IOMMU errors */
59#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
60#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
61#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2)
62#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3)
63#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4)
64
65static void __iommu_set_twl(struct omap_iommu *obj, bool on)
66{
67 u32 l = iommu_read_reg(obj, MMU_CNTL);
68
69 if (on)
70 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
71 else
72 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
73
74 l &= ~MMU_CNTL_MASK;
75 if (on)
76 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
77 else
78 l |= (MMU_CNTL_MMU_EN);
79
80 iommu_write_reg(obj, l, MMU_CNTL);
81}
82
83
84static int omap2_iommu_enable(struct omap_iommu *obj)
85{
86 u32 l, pa;
87
88 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
89 return -EINVAL;
90
91 pa = virt_to_phys(obj->iopgd);
92 if (!IS_ALIGNED(pa, SZ_16K))
93 return -EINVAL;
94
95 l = iommu_read_reg(obj, MMU_REVISION);
96 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
97 (l >> 4) & 0xf, l & 0xf);
98
99 iommu_write_reg(obj, pa, MMU_TTB);
100
101 __iommu_set_twl(obj, true);
102
103 return 0;
104}
105
106static void omap2_iommu_disable(struct omap_iommu *obj)
107{
108 u32 l = iommu_read_reg(obj, MMU_CNTL);
109
110 l &= ~MMU_CNTL_MASK;
111 iommu_write_reg(obj, l, MMU_CNTL);
112
113 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
114}
115
116static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
117{
118 __iommu_set_twl(obj, false);
119}
120
121static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
122{
123 u32 stat, da;
124 u32 errs = 0;
125
126 stat = iommu_read_reg(obj, MMU_IRQSTATUS);
127 stat &= MMU_IRQ_MASK;
128 if (!stat) {
129 *ra = 0;
130 return 0;
131 }
132
133 da = iommu_read_reg(obj, MMU_FAULT_AD);
134 *ra = da;
135
136 if (stat & MMU_IRQ_TLBMISS)
137 errs |= OMAP_IOMMU_ERR_TLB_MISS;
138 if (stat & MMU_IRQ_TRANSLATIONFAULT)
139 errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
140 if (stat & MMU_IRQ_EMUMISS)
141 errs |= OMAP_IOMMU_ERR_EMU_MISS;
142 if (stat & MMU_IRQ_TABLEWALKFAULT)
143 errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
144 if (stat & MMU_IRQ_MULTIHITFAULT)
145 errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
146 iommu_write_reg(obj, stat, MMU_IRQSTATUS);
147
148 return errs;
149}
150
151static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
152{
153 cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
154 cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
155}
156
157static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
158{
159 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
160 iommu_write_reg(obj, cr->ram, MMU_RAM);
161}
162
163static u32 omap2_cr_to_virt(struct cr_regs *cr)
164{
165 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
166 u32 mask = get_cam_va_mask(cr->cam & page_size);
167
168 return cr->cam & mask;
169}
170
171static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
172 struct iotlb_entry *e)
173{
174 struct cr_regs *cr;
175
176 if (e->da & ~(get_cam_va_mask(e->pgsz))) {
177 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
178 e->da);
179 return ERR_PTR(-EINVAL);
180 }
181
182 cr = kmalloc(sizeof(*cr), GFP_KERNEL);
183 if (!cr)
184 return ERR_PTR(-ENOMEM);
185
186 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
187 cr->ram = e->pa | e->endian | e->elsz | e->mixed;
188
189 return cr;
190}
191
192static inline int omap2_cr_valid(struct cr_regs *cr)
193{
194 return cr->cam & MMU_CAM_V;
195}
196
197static u32 omap2_get_pte_attr(struct iotlb_entry *e)
198{
199 u32 attr;
200
201 attr = e->mixed << 5;
202 attr |= e->endian;
203 attr |= e->elsz >> 3;
204 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
205 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
206 return attr;
207}
208
209static ssize_t
210omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
211{
212 char *p = buf;
213
214 /* FIXME: Need more detail analysis of cam/ram */
215 p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
216 (cr->cam & MMU_CAM_P) ? 1 : 0);
217
218 return p - buf;
219}
220
221#define pr_reg(name) \
222 do { \
223 ssize_t bytes; \
224 const char *str = "%20s: %08x\n"; \
225 const int maxcol = 32; \
226 bytes = snprintf(p, maxcol, str, __stringify(name), \
227 iommu_read_reg(obj, MMU_##name)); \
228 p += bytes; \
229 len -= bytes; \
230 if (len < maxcol) \
231 goto out; \
232 } while (0)
233
234static ssize_t
235omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
236{
237 char *p = buf;
238
239 pr_reg(REVISION);
240 pr_reg(IRQSTATUS);
241 pr_reg(IRQENABLE);
242 pr_reg(WALKING_ST);
243 pr_reg(CNTL);
244 pr_reg(FAULT_AD);
245 pr_reg(TTB);
246 pr_reg(LOCK);
247 pr_reg(LD_TLB);
248 pr_reg(CAM);
249 pr_reg(RAM);
250 pr_reg(GFLUSH);
251 pr_reg(FLUSH_ENTRY);
252 pr_reg(READ_CAM);
253 pr_reg(READ_RAM);
254 pr_reg(EMU_FAULT_AD);
255out:
256 return p - buf;
257}
258
259static void omap2_iommu_save_ctx(struct omap_iommu *obj)
260{
261 int i;
262 u32 *p = obj->ctx;
263
264 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
265 p[i] = iommu_read_reg(obj, i * sizeof(u32));
266 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
267 }
268
269 BUG_ON(p[0] != IOMMU_ARCH_VERSION);
270}
271
272static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
273{
274 int i;
275 u32 *p = obj->ctx;
276
277 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
278 iommu_write_reg(obj, p[i], i * sizeof(u32));
279 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
280 }
281
282 BUG_ON(p[0] != IOMMU_ARCH_VERSION);
283}
284
285static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
286{
287 e->da = cr->cam & MMU_CAM_VATAG_MASK;
288 e->pa = cr->ram & MMU_RAM_PADDR_MASK;
289 e->valid = cr->cam & MMU_CAM_V;
290 e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
291 e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
292 e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
293 e->mixed = cr->ram & MMU_RAM_MIXED;
294}
295
296static const struct iommu_functions omap2_iommu_ops = {
297 .version = IOMMU_ARCH_VERSION,
298
299 .enable = omap2_iommu_enable,
300 .disable = omap2_iommu_disable,
301 .set_twl = omap2_iommu_set_twl,
302 .fault_isr = omap2_iommu_fault_isr,
303
304 .tlb_read_cr = omap2_tlb_read_cr,
305 .tlb_load_cr = omap2_tlb_load_cr,
306
307 .cr_to_e = omap2_cr_to_e,
308 .cr_to_virt = omap2_cr_to_virt,
309 .alloc_cr = omap2_alloc_cr,
310 .cr_valid = omap2_cr_valid,
311 .dump_cr = omap2_dump_cr,
312
313 .get_pte_attr = omap2_get_pte_attr,
314
315 .save_ctx = omap2_iommu_save_ctx,
316 .restore_ctx = omap2_iommu_restore_ctx,
317 .dump_ctx = omap2_iommu_dump_ctx,
318};
319
320static int __init omap2_iommu_init(void)
321{
322 return omap_install_iommu_arch(&omap2_iommu_ops);
323}
324module_init(omap2_iommu_init);
325
326static void __exit omap2_iommu_exit(void)
327{
328 omap_uninstall_iommu_arch(&omap2_iommu_ops);
329}
330module_exit(omap2_iommu_exit);
331
332MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
333MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
334MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
new file mode 100644
index 000000000000..cd4ae9e5b0c6
--- /dev/null
+++ b/drivers/iommu/omap-iopgtable.h
@@ -0,0 +1,98 @@
1/*
2 * omap iommu: pagetable definitions
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 * "L2 table" address mask and size definitions.
15 */
16#define IOPGD_SHIFT 20
17#define IOPGD_SIZE (1UL << IOPGD_SHIFT)
18#define IOPGD_MASK (~(IOPGD_SIZE - 1))
19
20/*
21 * "section" address mask and size definitions.
22 */
23#define IOSECTION_SHIFT 20
24#define IOSECTION_SIZE (1UL << IOSECTION_SHIFT)
25#define IOSECTION_MASK (~(IOSECTION_SIZE - 1))
26
27/*
28 * "supersection" address mask and size definitions.
29 */
30#define IOSUPER_SHIFT 24
31#define IOSUPER_SIZE (1UL << IOSUPER_SHIFT)
32#define IOSUPER_MASK (~(IOSUPER_SIZE - 1))
33
34#define PTRS_PER_IOPGD (1UL << (32 - IOPGD_SHIFT))
35#define IOPGD_TABLE_SIZE (PTRS_PER_IOPGD * sizeof(u32))
36
37/*
38 * "small page" address mask and size definitions.
39 */
40#define IOPTE_SHIFT 12
41#define IOPTE_SIZE (1UL << IOPTE_SHIFT)
42#define IOPTE_MASK (~(IOPTE_SIZE - 1))
43
44/*
45 * "large page" address mask and size definitions.
46 */
47#define IOLARGE_SHIFT 16
48#define IOLARGE_SIZE (1UL << IOLARGE_SHIFT)
49#define IOLARGE_MASK (~(IOLARGE_SIZE - 1))
50
51#define PTRS_PER_IOPTE (1UL << (IOPGD_SHIFT - IOPTE_SHIFT))
52#define IOPTE_TABLE_SIZE (PTRS_PER_IOPTE * sizeof(u32))
53
54#define IOPAGE_MASK IOPTE_MASK
55
56/**
57 * omap_iommu_translate() - va to pa translation
58 * @d: omap iommu descriptor
59 * @va: virtual address
60 * @mask: omap iommu descriptor mask
61 *
62 * va to pa translation
63 */
64static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
65{
66 return (d & mask) | (va & (~mask));
67}
68
69/*
70 * some descriptor attributes.
71 */
72#define IOPGD_TABLE (1 << 0)
73#define IOPGD_SECTION (2 << 0)
74#define IOPGD_SUPER (1 << 18 | 2 << 0)
75
76#define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE)
77#define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION)
78#define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER)
79
80#define IOPTE_SMALL (2 << 0)
81#define IOPTE_LARGE (1 << 0)
82
83#define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL)
84#define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE)
85
86/* to find an entry in a page-table-directory */
87#define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1))
88#define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da))
89
90#define iopgd_page_paddr(iopgd) (*iopgd & ~((1 << 10) - 1))
91#define iopgd_page_vaddr(iopgd) ((u32 *)phys_to_virt(iopgd_page_paddr(iopgd)))
92
93/* to find an entry in the second-level page table. */
94#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
95#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
96
97#define to_iommu(dev) \
98 (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index 2e10c3e0a7ae..46d875690739 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -17,14 +17,58 @@
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/scatterlist.h> 18#include <linux/scatterlist.h>
19#include <linux/iommu.h> 19#include <linux/iommu.h>
20#include <linux/omap-iommu.h>
21#include <linux/platform_data/iommu-omap.h>
20 22
21#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
22#include <asm/mach/map.h> 24#include <asm/mach/map.h>
23 25
24#include <plat/iommu.h> 26#include "omap-iopgtable.h"
25#include <plat/iovmm.h> 27#include "omap-iommu.h"
26 28
27#include <plat/iopgtable.h> 29/*
30 * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma)
31 *
32 * lower 16 bit is used for h/w and upper 16 bit is for s/w.
33 */
34#define IOVMF_SW_SHIFT 16
35
36/*
37 * iovma: h/w flags derived from cam and ram attribute
38 */
39#define IOVMF_CAM_MASK (~((1 << 10) - 1))
40#define IOVMF_RAM_MASK (~IOVMF_CAM_MASK)
41
42#define IOVMF_PGSZ_MASK (3 << 0)
43#define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M
44#define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K
45#define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K
46#define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M
47
48#define IOVMF_ENDIAN_MASK (1 << 9)
49#define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG
50
51#define IOVMF_ELSZ_MASK (3 << 7)
52#define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16
53#define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32
54#define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE
55
56#define IOVMF_MIXED_MASK (1 << 6)
57#define IOVMF_MIXED MMU_RAM_MIXED
58
59/*
60 * iovma: s/w flags, used for mapping and umapping internally.
61 */
62#define IOVMF_MMIO (1 << IOVMF_SW_SHIFT)
63#define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT)
64#define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT)
65
66/* "superpages" is supported just with physically linear pages */
67#define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT))
68#define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT))
69#define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT))
70
71#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
28 72
29static struct kmem_cache *iovm_area_cachep; 73static struct kmem_cache *iovm_area_cachep;
30 74
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index c16e8fc8a4bd..4c9db62814ff 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -398,6 +398,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
398 do_gart_setup(gart, NULL); 398 do_gart_setup(gart, NULL);
399 399
400 gart_handle = gart; 400 gart_handle = gart;
401 bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
401 return 0; 402 return 0;
402 403
403fail: 404fail:
@@ -450,7 +451,6 @@ static struct platform_driver tegra_gart_driver = {
450 451
451static int __devinit tegra_gart_init(void) 452static int __devinit tegra_gart_init(void)
452{ 453{
453 bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
454 return platform_driver_register(&tegra_gart_driver); 454 return platform_driver_register(&tegra_gart_driver);
455} 455}
456 456
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index c0f7a4266263..843123acbb8d 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -696,10 +696,8 @@ static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
696 *pte = _PTE_VACANT(iova); 696 *pte = _PTE_VACANT(iova);
697 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); 697 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
698 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0); 698 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
699 if (!--(*count)) { 699 if (!--(*count))
700 free_ptbl(as, iova); 700 free_ptbl(as, iova);
701 smmu_flush_regs(as->smmu, 0);
702 }
703} 701}
704 702
705static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, 703static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
@@ -1234,6 +1232,7 @@ static int tegra_smmu_probe(struct platform_device *pdev)
1234 1232
1235 smmu_debugfs_create(smmu); 1233 smmu_debugfs_create(smmu);
1236 smmu_handle = smmu; 1234 smmu_handle = smmu;
1235 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
1237 return 0; 1236 return 0;
1238} 1237}
1239 1238
@@ -1278,7 +1277,6 @@ static struct platform_driver tegra_smmu_driver = {
1278 1277
1279static int __devinit tegra_smmu_init(void) 1278static int __devinit tegra_smmu_init(void)
1280{ 1279{
1281 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
1282 return platform_driver_register(&tegra_smmu_driver); 1280 return platform_driver_register(&tegra_smmu_driver);
1283} 1281}
1284 1282
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 99640d8c1db0..7f182f0ff3da 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -61,6 +61,7 @@
61#include <linux/i2c.h> 61#include <linux/i2c.h>
62#include <linux/interrupt.h> 62#include <linux/interrupt.h>
63#include <linux/module.h> 63#include <linux/module.h>
64#include <linux/omap-iommu.h>
64#include <linux/platform_device.h> 65#include <linux/platform_device.h>
65#include <linux/regulator/consumer.h> 66#include <linux/regulator/consumer.h>
66#include <linux/slab.h> 67#include <linux/slab.h>
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 8be7487c326f..8d6866942b85 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -31,11 +31,9 @@
31#include <media/v4l2-device.h> 31#include <media/v4l2-device.h>
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/iommu.h>
34#include <linux/platform_device.h> 35#include <linux/platform_device.h>
35#include <linux/wait.h> 36#include <linux/wait.h>
36#include <linux/iommu.h>
37#include <plat/iommu.h>
38#include <plat/iovmm.h>
39 37
40#include "ispstat.h" 38#include "ispstat.h"
41#include "ispccdc.h" 39#include "ispccdc.h"
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index aa9df9d71a7b..60e60aa64fb4 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -30,6 +30,7 @@
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/omap-iommu.h>
33#include <linux/sched.h> 34#include <linux/sched.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
35#include <media/v4l2-event.h> 36#include <media/v4l2-event.h>
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index b8640be692f1..e7939869bda7 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/omap-iommu.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/uaccess.h> 31#include <linux/uaccess.h>
31 32
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 75cd309035f9..6e74346cc357 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -27,6 +27,7 @@
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/omap-iommu.h>
30#include <linux/pagemap.h> 31#include <linux/pagemap.h>
31#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
32#include <linux/sched.h> 33#include <linux/sched.h>
@@ -34,8 +35,6 @@
34#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
35#include <media/v4l2-dev.h> 36#include <media/v4l2-dev.h>
36#include <media/v4l2-ioctl.h> 37#include <media/v4l2-ioctl.h>
37#include <plat/iommu.h>
38#include <plat/iovmm.h>
39#include <plat/omap-pm.h> 38#include <plat/omap-pm.h>
40 39
41#include "ispvideo.h" 40#include "ispvideo.h"