aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DMA-API-HOWTO.txt126
-rw-r--r--Documentation/DMA-API.txt12
-rw-r--r--arch/arm/include/asm/dma-mapping.h1
-rw-r--r--arch/arm/mach-omap2/devices.c2
-rw-r--r--arch/arm/mach-omap2/omap-iommu.c167
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c4
-rw-r--r--arch/arm64/include/asm/dma-mapping.h1
-rw-r--r--arch/c6x/include/asm/dma-mapping.h1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h1
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h1
-rw-r--r--arch/sh/include/asm/dma-mapping.h1
-rw-r--r--arch/sparc/include/asm/dma-mapping.h1
-rw-r--r--arch/tile/include/asm/dma-mapping.h1
-rw-r--r--arch/x86/include/asm/dma-mapping.h1
-rw-r--r--drivers/iommu/amd_iommu.c196
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/intel-iommu.c31
-rw-r--r--drivers/iommu/omap-iommu.c68
-rw-r--r--drivers/iommu/omap-iommu.h3
-rw-r--r--drivers/iommu/omap-iommu2.c36
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/tegra-smmu.c6
-rw-r--r--include/linux/dma-debug.h7
-rw-r--r--include/linux/platform_data/iommu-omap.h9
-rw-r--r--lib/dma-debug.c66
27 files changed, 477 insertions, 272 deletions
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index a0b6250add79..4a4fb295ceef 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -468,11 +468,46 @@ To map a single region, you do:
468 size_t size = buffer->len; 468 size_t size = buffer->len;
469 469
470 dma_handle = dma_map_single(dev, addr, size, direction); 470 dma_handle = dma_map_single(dev, addr, size, direction);
471 if (dma_mapping_error(dma_handle)) {
472 /*
473 * reduce current DMA mapping usage,
474 * delay and try again later or
475 * reset driver.
476 */
477 goto map_error_handling;
478 }
471 479
472and to unmap it: 480and to unmap it:
473 481
474 dma_unmap_single(dev, dma_handle, size, direction); 482 dma_unmap_single(dev, dma_handle, size, direction);
475 483
484You should call dma_mapping_error() as dma_map_single() could fail and return
485error. Not all dma implementations support dma_mapping_error() interface.
486However, it is a good practice to call dma_mapping_error() interface, which
487will invoke the generic mapping error check interface. Doing so will ensure
488that the mapping code will work correctly on all dma implementations without
489any dependency on the specifics of the underlying implementation. Using the
490returned address without checking for errors could result in failures ranging
491from panics to silent data corruption. Couple of example of incorrect ways to
492check for errors that make assumptions about the underlying dma implementation
493are as follows and these are applicable to dma_map_page() as well.
494
495Incorrect example 1:
496 dma_addr_t dma_handle;
497
498 dma_handle = dma_map_single(dev, addr, size, direction);
499 if ((dma_handle & 0xffff != 0) || (dma_handle >= 0x1000000)) {
500 goto map_error;
501 }
502
503Incorrect example 2:
504 dma_addr_t dma_handle;
505
506 dma_handle = dma_map_single(dev, addr, size, direction);
507 if (dma_handle == DMA_ERROR_CODE) {
508 goto map_error;
509 }
510
476You should call dma_unmap_single when the DMA activity is finished, e.g. 511You should call dma_unmap_single when the DMA activity is finished, e.g.
477from the interrupt which told you that the DMA transfer is done. 512from the interrupt which told you that the DMA transfer is done.
478 513
@@ -489,6 +524,14 @@ Specifically:
489 size_t size = buffer->len; 524 size_t size = buffer->len;
490 525
491 dma_handle = dma_map_page(dev, page, offset, size, direction); 526 dma_handle = dma_map_page(dev, page, offset, size, direction);
527 if (dma_mapping_error(dma_handle)) {
528 /*
529 * reduce current DMA mapping usage,
530 * delay and try again later or
531 * reset driver.
532 */
533 goto map_error_handling;
534 }
492 535
493 ... 536 ...
494 537
@@ -496,6 +539,12 @@ Specifically:
496 539
497Here, "offset" means byte offset within the given page. 540Here, "offset" means byte offset within the given page.
498 541
542You should call dma_mapping_error() as dma_map_page() could fail and return
543error as outlined under the dma_map_single() discussion.
544
545You should call dma_unmap_page when the DMA activity is finished, e.g.
546from the interrupt which told you that the DMA transfer is done.
547
499With scatterlists, you map a region gathered from several regions by: 548With scatterlists, you map a region gathered from several regions by:
500 549
501 int i, count = dma_map_sg(dev, sglist, nents, direction); 550 int i, count = dma_map_sg(dev, sglist, nents, direction);
@@ -578,6 +627,14 @@ to use the dma_sync_*() interfaces.
578 dma_addr_t mapping; 627 dma_addr_t mapping;
579 628
580 mapping = dma_map_single(cp->dev, buffer, len, DMA_FROM_DEVICE); 629 mapping = dma_map_single(cp->dev, buffer, len, DMA_FROM_DEVICE);
630 if (dma_mapping_error(dma_handle)) {
631 /*
632 * reduce current DMA mapping usage,
633 * delay and try again later or
634 * reset driver.
635 */
636 goto map_error_handling;
637 }
581 638
582 cp->rx_buf = buffer; 639 cp->rx_buf = buffer;
583 cp->rx_len = len; 640 cp->rx_len = len;
@@ -658,6 +715,75 @@ failure can be determined by:
658 * delay and try again later or 715 * delay and try again later or
659 * reset driver. 716 * reset driver.
660 */ 717 */
718 goto map_error_handling;
719 }
720
721- unmap pages that are already mapped, when mapping error occurs in the middle
722 of a multiple page mapping attempt. These example are applicable to
723 dma_map_page() as well.
724
725Example 1:
726 dma_addr_t dma_handle1;
727 dma_addr_t dma_handle2;
728
729 dma_handle1 = dma_map_single(dev, addr, size, direction);
730 if (dma_mapping_error(dev, dma_handle1)) {
731 /*
732 * reduce current DMA mapping usage,
733 * delay and try again later or
734 * reset driver.
735 */
736 goto map_error_handling1;
737 }
738 dma_handle2 = dma_map_single(dev, addr, size, direction);
739 if (dma_mapping_error(dev, dma_handle2)) {
740 /*
741 * reduce current DMA mapping usage,
742 * delay and try again later or
743 * reset driver.
744 */
745 goto map_error_handling2;
746 }
747
748 ...
749
750 map_error_handling2:
751 dma_unmap_single(dma_handle1);
752 map_error_handling1:
753
754Example 2: (if buffers are allocated a loop, unmap all mapped buffers when
755 mapping error is detected in the middle)
756
757 dma_addr_t dma_addr;
758 dma_addr_t array[DMA_BUFFERS];
759 int save_index = 0;
760
761 for (i = 0; i < DMA_BUFFERS; i++) {
762
763 ...
764
765 dma_addr = dma_map_single(dev, addr, size, direction);
766 if (dma_mapping_error(dev, dma_addr)) {
767 /*
768 * reduce current DMA mapping usage,
769 * delay and try again later or
770 * reset driver.
771 */
772 goto map_error_handling;
773 }
774 array[i].dma_addr = dma_addr;
775 save_index++;
776 }
777
778 ...
779
780 map_error_handling:
781
782 for (i = 0; i < save_index; i++) {
783
784 ...
785
786 dma_unmap_single(array[i].dma_addr);
661 } 787 }
662 788
663Networking drivers must call dev_kfree_skb to free the socket buffer 789Networking drivers must call dev_kfree_skb to free the socket buffer
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 66bd97a95f10..78a6c569d204 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -678,3 +678,15 @@ out of dma_debug_entries. These entries are preallocated at boot. The number
678of preallocated entries is defined per architecture. If it is too low for you 678of preallocated entries is defined per architecture. If it is too low for you
679boot with 'dma_debug_entries=<your_desired_number>' to overwrite the 679boot with 'dma_debug_entries=<your_desired_number>' to overwrite the
680architectural default. 680architectural default.
681
682void debug_dmap_mapping_error(struct device *dev, dma_addr_t dma_addr);
683
684dma-debug interface debug_dma_mapping_error() to debug drivers that fail
685to check dma mapping errors on addresses returned by dma_map_single() and
686dma_map_page() interfaces. This interface clears a flag set by
687debug_dma_map_page() to indicate that dma_mapping_error() has been called by
688the driver. When driver does unmap, debug_dma_unmap() checks the flag and if
689this flag is still set, prints warning message that includes call trace that
690leads up to the unmap. This interface can be called from dma_mapping_error()
691routines to enable dma mapping error check debugging.
692
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 67d06324e74a..5b579b951503 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -91,6 +91,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
91 */ 91 */
92static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 92static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
93{ 93{
94 debug_dma_mapping_error(dev, dma_addr);
94 return dma_addr == DMA_ERROR_CODE; 95 return dma_addr == DMA_ERROR_CODE;
95} 96}
96 97
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 4abb8b5e9bc0..5e304d0719a2 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -226,7 +226,7 @@ static struct platform_device omap3isp_device = {
226}; 226};
227 227
228static struct omap_iommu_arch_data omap3_isp_iommu = { 228static struct omap_iommu_arch_data omap3_isp_iommu = {
229 .name = "isp", 229 .name = "mmu_isp",
230}; 230};
231 231
232int omap3_init_camera(struct isp_platform_data *pdata) 232int omap3_init_camera(struct isp_platform_data *pdata)
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index a6a4ff8744b7..7642fc4672c1 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -12,153 +12,60 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/err.h>
16#include <linux/slab.h>
15 17
16#include <linux/platform_data/iommu-omap.h> 18#include <linux/platform_data/iommu-omap.h>
19#include <plat/omap_hwmod.h>
20#include <plat/omap_device.h>
17 21
18#include "soc.h" 22static int __init omap_iommu_dev_init(struct omap_hwmod *oh, void *unused)
19#include "common.h"
20
21struct iommu_device {
22 resource_size_t base;
23 int irq;
24 struct iommu_platform_data pdata;
25 struct resource res[2];
26};
27static struct iommu_device *devices;
28static int num_iommu_devices;
29
30#ifdef CONFIG_ARCH_OMAP3
31static struct iommu_device omap3_devices[] = {
32 {
33 .base = 0x480bd400,
34 .irq = 24 + OMAP_INTC_START,
35 .pdata = {
36 .name = "isp",
37 .nr_tlb_entries = 8,
38 .clk_name = "cam_ick",
39 .da_start = 0x0,
40 .da_end = 0xFFFFF000,
41 },
42 },
43#if defined(CONFIG_OMAP_IOMMU_IVA2)
44 {
45 .base = 0x5d000000,
46 .irq = 28 + OMAP_INTC_START,
47 .pdata = {
48 .name = "iva2",
49 .nr_tlb_entries = 32,
50 .clk_name = "iva2_ck",
51 .da_start = 0x11000000,
52 .da_end = 0xFFFFF000,
53 },
54 },
55#endif
56};
57#define NR_OMAP3_IOMMU_DEVICES ARRAY_SIZE(omap3_devices)
58static struct platform_device *omap3_iommu_pdev[NR_OMAP3_IOMMU_DEVICES];
59#else
60#define omap3_devices NULL
61#define NR_OMAP3_IOMMU_DEVICES 0
62#define omap3_iommu_pdev NULL
63#endif
64
65#ifdef CONFIG_ARCH_OMAP4
66static struct iommu_device omap4_devices[] = {
67 {
68 .base = OMAP4_MMU1_BASE,
69 .irq = 100 + OMAP44XX_IRQ_GIC_START,
70 .pdata = {
71 .name = "ducati",
72 .nr_tlb_entries = 32,
73 .clk_name = "ipu_fck",
74 .da_start = 0x0,
75 .da_end = 0xFFFFF000,
76 },
77 },
78 {
79 .base = OMAP4_MMU2_BASE,
80 .irq = 28 + OMAP44XX_IRQ_GIC_START,
81 .pdata = {
82 .name = "tesla",
83 .nr_tlb_entries = 32,
84 .clk_name = "dsp_fck",
85 .da_start = 0x0,
86 .da_end = 0xFFFFF000,
87 },
88 },
89};
90#define NR_OMAP4_IOMMU_DEVICES ARRAY_SIZE(omap4_devices)
91static struct platform_device *omap4_iommu_pdev[NR_OMAP4_IOMMU_DEVICES];
92#else
93#define omap4_devices NULL
94#define NR_OMAP4_IOMMU_DEVICES 0
95#define omap4_iommu_pdev NULL
96#endif
97
98static struct platform_device **omap_iommu_pdev;
99
100static int __init omap_iommu_init(void)
101{ 23{
102 int i, err; 24 struct platform_device *pdev;
103 struct resource res[] = { 25 struct iommu_platform_data *pdata;
104 { .flags = IORESOURCE_MEM }, 26 struct omap_mmu_dev_attr *a = (struct omap_mmu_dev_attr *)oh->dev_attr;
105 { .flags = IORESOURCE_IRQ }, 27 static int i;
106 }; 28
29 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
30 if (!pdata)
31 return -ENOMEM;
32
33 pdata->name = oh->name;
34 pdata->nr_tlb_entries = a->nr_tlb_entries;
35 pdata->da_start = a->da_start;
36 pdata->da_end = a->da_end;
37
38 if (oh->rst_lines_cnt == 1) {
39 pdata->reset_name = oh->rst_lines->name;
40 pdata->assert_reset = omap_device_assert_hardreset;
41 pdata->deassert_reset = omap_device_deassert_hardreset;
42 }
107 43
108 if (cpu_is_omap34xx()) { 44 pdev = omap_device_build("omap-iommu", i, oh, pdata, sizeof(*pdata),
109 devices = omap3_devices; 45 NULL, 0, 0);
110 omap_iommu_pdev = omap3_iommu_pdev;
111 num_iommu_devices = NR_OMAP3_IOMMU_DEVICES;
112 } else if (cpu_is_omap44xx()) {
113 devices = omap4_devices;
114 omap_iommu_pdev = omap4_iommu_pdev;
115 num_iommu_devices = NR_OMAP4_IOMMU_DEVICES;
116 } else
117 return -ENODEV;
118 46
119 for (i = 0; i < num_iommu_devices; i++) { 47 kfree(pdata);
120 struct platform_device *pdev;
121 const struct iommu_device *d = &devices[i];
122 48
123 pdev = platform_device_alloc("omap-iommu", i); 49 if (IS_ERR(pdev)) {
124 if (!pdev) { 50 pr_err("%s: device build err: %ld\n", __func__, PTR_ERR(pdev));
125 err = -ENOMEM; 51 return PTR_ERR(pdev);
126 goto err_out; 52 }
127 }
128 53
129 res[0].start = d->base; 54 i++;
130 res[0].end = d->base + MMU_REG_SIZE - 1;
131 res[1].start = res[1].end = d->irq;
132 55
133 err = platform_device_add_resources(pdev, res,
134 ARRAY_SIZE(res));
135 if (err)
136 goto err_out;
137 err = platform_device_add_data(pdev, &d->pdata,
138 sizeof(d->pdata));
139 if (err)
140 goto err_out;
141 err = platform_device_add(pdev);
142 if (err)
143 goto err_out;
144 omap_iommu_pdev[i] = pdev;
145 }
146 return 0; 56 return 0;
57}
147 58
148err_out: 59static int __init omap_iommu_init(void)
149 while (i--) 60{
150 platform_device_put(omap_iommu_pdev[i]); 61 return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL);
151 return err;
152} 62}
153/* must be ready before omap3isp is probed */ 63/* must be ready before omap3isp is probed */
154subsys_initcall(omap_iommu_init); 64subsys_initcall(omap_iommu_init);
155 65
156static void __exit omap_iommu_exit(void) 66static void __exit omap_iommu_exit(void)
157{ 67{
158 int i; 68 /* Do nothing */
159
160 for (i = 0; i < num_iommu_devices; i++)
161 platform_device_unregister(omap_iommu_pdev[i]);
162} 69}
163module_exit(omap_iommu_exit); 70module_exit(omap_iommu_exit);
164 71
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 272b0178dba6..f9fab942d5ba 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -653,7 +653,7 @@ static struct omap_hwmod omap44xx_dsp_hwmod = {
653 .mpu_irqs = omap44xx_dsp_irqs, 653 .mpu_irqs = omap44xx_dsp_irqs,
654 .rst_lines = omap44xx_dsp_resets, 654 .rst_lines = omap44xx_dsp_resets,
655 .rst_lines_cnt = ARRAY_SIZE(omap44xx_dsp_resets), 655 .rst_lines_cnt = ARRAY_SIZE(omap44xx_dsp_resets),
656 .main_clk = "dsp_fck", 656 .main_clk = "dpll_iva_m4x2_ck",
657 .prcm = { 657 .prcm = {
658 .omap4 = { 658 .omap4 = {
659 .clkctrl_offs = OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET, 659 .clkctrl_offs = OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET,
@@ -1679,7 +1679,7 @@ static struct omap_hwmod omap44xx_ipu_hwmod = {
1679 .mpu_irqs = omap44xx_ipu_irqs, 1679 .mpu_irqs = omap44xx_ipu_irqs,
1680 .rst_lines = omap44xx_ipu_resets, 1680 .rst_lines = omap44xx_ipu_resets,
1681 .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_resets), 1681 .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_resets),
1682 .main_clk = "ipu_fck", 1682 .main_clk = "ducati_clk_mux_ck",
1683 .prcm = { 1683 .prcm = {
1684 .omap4 = { 1684 .omap4 = {
1685 .clkctrl_offs = OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET, 1685 .clkctrl_offs = OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET,
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 538f4b44db5d..994776894198 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -50,6 +50,7 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
50static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr) 50static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr)
51{ 51{
52 struct dma_map_ops *ops = get_dma_ops(dev); 52 struct dma_map_ops *ops = get_dma_ops(dev);
53 debug_dma_mapping_error(dev, dev_addr);
53 return ops->mapping_error(dev, dev_addr); 54 return ops->mapping_error(dev, dev_addr);
54} 55}
55 56
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index 03579fd99dba..3c694065030f 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -32,6 +32,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
32 */ 32 */
33static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 33static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
34{ 34{
35 debug_dma_mapping_error(dev, dma_addr);
35 return dma_addr == ~0; 36 return dma_addr == ~0;
36} 37}
37 38
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 4f5e8148440d..cf3ab7e784b5 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -58,6 +58,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
58static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) 58static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
59{ 59{
60 struct dma_map_ops *ops = platform_dma_get_ops(dev); 60 struct dma_map_ops *ops = platform_dma_get_ops(dev);
61 debug_dma_mapping_error(dev, daddr);
61 return ops->mapping_error(dev, daddr); 62 return ops->mapping_error(dev, daddr);
62} 63}
63 64
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 01d228286cb0..46460f1c49c4 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -114,6 +114,8 @@ static inline void __dma_sync(unsigned long paddr,
114static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 114static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
115{ 115{
116 struct dma_map_ops *ops = get_dma_ops(dev); 116 struct dma_map_ops *ops = get_dma_ops(dev);
117
118 debug_dma_mapping_error(dev, dma_addr);
117 if (ops->mapping_error) 119 if (ops->mapping_error)
118 return ops->mapping_error(dev, dma_addr); 120 return ops->mapping_error(dev, dma_addr);
119 121
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index be39a12901c6..006b43e38a9c 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -40,6 +40,8 @@ static inline int dma_supported(struct device *dev, u64 mask)
40static inline int dma_mapping_error(struct device *dev, u64 mask) 40static inline int dma_mapping_error(struct device *dev, u64 mask)
41{ 41{
42 struct dma_map_ops *ops = get_dma_ops(dev); 42 struct dma_map_ops *ops = get_dma_ops(dev);
43
44 debug_dma_mapping_error(dev, mask);
43 return ops->mapping_error(dev, mask); 45 return ops->mapping_error(dev, mask);
44} 46}
45 47
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 78160874809a..e27e9ad6818e 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -172,6 +172,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
172{ 172{
173 struct dma_map_ops *dma_ops = get_dma_ops(dev); 173 struct dma_map_ops *dma_ops = get_dma_ops(dev);
174 174
175 debug_dma_mapping_error(dev, dma_addr);
175 if (dma_ops->mapping_error) 176 if (dma_ops->mapping_error)
176 return dma_ops->mapping_error(dev, dma_addr); 177 return dma_ops->mapping_error(dev, dma_addr);
177 178
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 8bd965e00a15..b437f2c780b8 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -46,6 +46,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
46{ 46{
47 struct dma_map_ops *ops = get_dma_ops(dev); 47 struct dma_map_ops *ops = get_dma_ops(dev);
48 48
49 debug_dma_mapping_error(dev, dma_addr);
49 if (ops->mapping_error) 50 if (ops->mapping_error)
50 return ops->mapping_error(dev, dma_addr); 51 return ops->mapping_error(dev, dma_addr);
51 52
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 8493fd3c7ba5..05fe53f5346e 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -59,6 +59,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
59 59
60static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 60static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
61{ 61{
62 debug_dma_mapping_error(dev, dma_addr);
62 return (dma_addr == DMA_ERROR_CODE); 63 return (dma_addr == DMA_ERROR_CODE);
63} 64}
64 65
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index 4b6247d1a315..f2ff191376b4 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -72,6 +72,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
72static inline int 72static inline int
73dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 73dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
74{ 74{
75 debug_dma_mapping_error(dev, dma_addr);
75 return get_dma_ops(dev)->mapping_error(dev, dma_addr); 76 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
76} 77}
77 78
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index f7b4c7903e7e..808dae63eeea 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -47,6 +47,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
47static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 47static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
48{ 48{
49 struct dma_map_ops *ops = get_dma_ops(dev); 49 struct dma_map_ops *ops = get_dma_ops(dev);
50 debug_dma_mapping_error(dev, dma_addr);
50 if (ops->mapping_error) 51 if (ops->mapping_error)
51 return ops->mapping_error(dev, dma_addr); 52 return ops->mapping_error(dev, dma_addr);
52 53
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 55074cba20eb..c1c74e030a58 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -57,17 +57,9 @@
57 * physically contiguous memory regions it is mapping into page sizes 57 * physically contiguous memory regions it is mapping into page sizes
58 * that we support. 58 * that we support.
59 * 59 *
60 * Traditionally the IOMMU core just handed us the mappings directly, 60 * 512GB Pages are not supported due to a hardware bug
61 * after making sure the size is an order of a 4KiB page and that the
62 * mapping has natural alignment.
63 *
64 * To retain this behavior, we currently advertise that we support
65 * all page sizes that are an order of 4KiB.
66 *
67 * If at some point we'd like to utilize the IOMMU core's new behavior,
68 * we could change this to advertise the real page sizes we support.
69 */ 61 */
70#define AMD_IOMMU_PGSIZES (~0xFFFUL) 62#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
71 63
72static DEFINE_RWLOCK(amd_iommu_devtable_lock); 64static DEFINE_RWLOCK(amd_iommu_devtable_lock);
73 65
@@ -140,6 +132,9 @@ static void free_dev_data(struct iommu_dev_data *dev_data)
140 list_del(&dev_data->dev_data_list); 132 list_del(&dev_data->dev_data_list);
141 spin_unlock_irqrestore(&dev_data_list_lock, flags); 133 spin_unlock_irqrestore(&dev_data_list_lock, flags);
142 134
135 if (dev_data->group)
136 iommu_group_put(dev_data->group);
137
143 kfree(dev_data); 138 kfree(dev_data);
144} 139}
145 140
@@ -274,41 +269,23 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
274 *from = to; 269 *from = to;
275} 270}
276 271
277#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 272static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
278
279static int iommu_init_device(struct device *dev)
280{ 273{
281 struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev); 274 while (!bus->self) {
282 struct iommu_dev_data *dev_data; 275 if (!pci_is_root_bus(bus))
283 struct iommu_group *group; 276 bus = bus->parent;
284 u16 alias; 277 else
285 int ret; 278 return ERR_PTR(-ENODEV);
286 279 }
287 if (dev->archdata.iommu)
288 return 0;
289
290 dev_data = find_dev_data(get_device_id(dev));
291 if (!dev_data)
292 return -ENOMEM;
293
294 alias = amd_iommu_alias_table[dev_data->devid];
295 if (alias != dev_data->devid) {
296 struct iommu_dev_data *alias_data;
297 280
298 alias_data = find_dev_data(alias); 281 return bus;
299 if (alias_data == NULL) { 282}
300 pr_err("AMD-Vi: Warning: Unhandled device %s\n",
301 dev_name(dev));
302 free_dev_data(dev_data);
303 return -ENOTSUPP;
304 }
305 dev_data->alias_data = alias_data;
306 283
307 dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff); 284#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
308 }
309 285
310 if (dma_pdev == NULL) 286static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
311 dma_pdev = pci_dev_get(pdev); 287{
288 struct pci_dev *dma_pdev = pdev;
312 289
313 /* Account for quirked devices */ 290 /* Account for quirked devices */
314 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); 291 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
@@ -330,14 +307,9 @@ static int iommu_init_device(struct device *dev)
330 * Finding the next device may require skipping virtual buses. 307 * Finding the next device may require skipping virtual buses.
331 */ 308 */
332 while (!pci_is_root_bus(dma_pdev->bus)) { 309 while (!pci_is_root_bus(dma_pdev->bus)) {
333 struct pci_bus *bus = dma_pdev->bus; 310 struct pci_bus *bus = find_hosted_bus(dma_pdev->bus);
334 311 if (IS_ERR(bus))
335 while (!bus->self) { 312 break;
336 if (!pci_is_root_bus(bus))
337 bus = bus->parent;
338 else
339 goto root_bus;
340 }
341 313
342 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 314 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
343 break; 315 break;
@@ -345,19 +317,137 @@ static int iommu_init_device(struct device *dev)
345 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); 317 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
346 } 318 }
347 319
348root_bus: 320 return dma_pdev;
349 group = iommu_group_get(&dma_pdev->dev); 321}
350 pci_dev_put(dma_pdev); 322
323static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev)
324{
325 struct iommu_group *group = iommu_group_get(&pdev->dev);
326 int ret;
327
351 if (!group) { 328 if (!group) {
352 group = iommu_group_alloc(); 329 group = iommu_group_alloc();
353 if (IS_ERR(group)) 330 if (IS_ERR(group))
354 return PTR_ERR(group); 331 return PTR_ERR(group);
332
333 WARN_ON(&pdev->dev != dev);
355 } 334 }
356 335
357 ret = iommu_group_add_device(group, dev); 336 ret = iommu_group_add_device(group, dev);
358
359 iommu_group_put(group); 337 iommu_group_put(group);
338 return ret;
339}
340
341static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data,
342 struct device *dev)
343{
344 if (!dev_data->group) {
345 struct iommu_group *group = iommu_group_alloc();
346 if (IS_ERR(group))
347 return PTR_ERR(group);
348
349 dev_data->group = group;
350 }
351
352 return iommu_group_add_device(dev_data->group, dev);
353}
354
355static int init_iommu_group(struct device *dev)
356{
357 struct iommu_dev_data *dev_data;
358 struct iommu_group *group;
359 struct pci_dev *dma_pdev;
360 int ret;
361
362 group = iommu_group_get(dev);
363 if (group) {
364 iommu_group_put(group);
365 return 0;
366 }
367
368 dev_data = find_dev_data(get_device_id(dev));
369 if (!dev_data)
370 return -ENOMEM;
371
372 if (dev_data->alias_data) {
373 u16 alias;
374 struct pci_bus *bus;
375
376 if (dev_data->alias_data->group)
377 goto use_group;
378
379 /*
380 * If the alias device exists, it's effectively just a first
381 * level quirk for finding the DMA source.
382 */
383 alias = amd_iommu_alias_table[dev_data->devid];
384 dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
385 if (dma_pdev) {
386 dma_pdev = get_isolation_root(dma_pdev);
387 goto use_pdev;
388 }
389
390 /*
391 * If the alias is virtual, try to find a parent device
392 * and test whether the IOMMU group is actualy rooted above
393 * the alias. Be careful to also test the parent device if
394 * we think the alias is the root of the group.
395 */
396 bus = pci_find_bus(0, alias >> 8);
397 if (!bus)
398 goto use_group;
399
400 bus = find_hosted_bus(bus);
401 if (IS_ERR(bus) || !bus->self)
402 goto use_group;
403
404 dma_pdev = get_isolation_root(pci_dev_get(bus->self));
405 if (dma_pdev != bus->self || (dma_pdev->multifunction &&
406 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)))
407 goto use_pdev;
408
409 pci_dev_put(dma_pdev);
410 goto use_group;
411 }
412
413 dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev)));
414use_pdev:
415 ret = use_pdev_iommu_group(dma_pdev, dev);
416 pci_dev_put(dma_pdev);
417 return ret;
418use_group:
419 return use_dev_data_iommu_group(dev_data->alias_data, dev);
420}
421
422static int iommu_init_device(struct device *dev)
423{
424 struct pci_dev *pdev = to_pci_dev(dev);
425 struct iommu_dev_data *dev_data;
426 u16 alias;
427 int ret;
428
429 if (dev->archdata.iommu)
430 return 0;
431
432 dev_data = find_dev_data(get_device_id(dev));
433 if (!dev_data)
434 return -ENOMEM;
435
436 alias = amd_iommu_alias_table[dev_data->devid];
437 if (alias != dev_data->devid) {
438 struct iommu_dev_data *alias_data;
439
440 alias_data = find_dev_data(alias);
441 if (alias_data == NULL) {
442 pr_err("AMD-Vi: Warning: Unhandled device %s\n",
443 dev_name(dev));
444 free_dev_data(dev_data);
445 return -ENOTSUPP;
446 }
447 dev_data->alias_data = alias_data;
448 }
360 449
450 ret = init_iommu_group(dev);
361 if (ret) 451 if (ret)
362 return ret; 452 return ret;
363 453
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c9aa3d079ff0..e38ab438bb34 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -426,6 +426,7 @@ struct iommu_dev_data {
426 struct iommu_dev_data *alias_data;/* The alias dev_data */ 426 struct iommu_dev_data *alias_data;/* The alias dev_data */
427 struct protection_domain *domain; /* Domain the device is bound to */ 427 struct protection_domain *domain; /* Domain the device is bound to */
428 atomic_t bind; /* Domain attach reference count */ 428 atomic_t bind; /* Domain attach reference count */
429 struct iommu_group *group; /* IOMMU group for virtual aliases */
429 u16 devid; /* PCI Device ID */ 430 u16 devid; /* PCI Device ID */
430 bool iommu_v2; /* Device can make use of IOMMUv2 */ 431 bool iommu_v2; /* Device can make use of IOMMUv2 */
431 bool passthrough; /* Default for device is pt_domain */ 432 bool passthrough; /* Default for device is pt_domain */
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 9476c1b96090..c2c07a4a7f21 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2327,8 +2327,39 @@ static int domain_add_dev_info(struct dmar_domain *domain,
2327 return 0; 2327 return 0;
2328} 2328}
2329 2329
2330static bool device_has_rmrr(struct pci_dev *dev)
2331{
2332 struct dmar_rmrr_unit *rmrr;
2333 int i;
2334
2335 for_each_rmrr_units(rmrr) {
2336 for (i = 0; i < rmrr->devices_cnt; i++) {
2337 /*
2338 * Return TRUE if this RMRR contains the device that
2339 * is passed in.
2340 */
2341 if (rmrr->devices[i] == dev)
2342 return true;
2343 }
2344 }
2345 return false;
2346}
2347
2330static int iommu_should_identity_map(struct pci_dev *pdev, int startup) 2348static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2331{ 2349{
2350
2351 /*
2352 * We want to prevent any device associated with an RMRR from
2353 * getting placed into the SI Domain. This is done because
2354 * problems exist when devices are moved in and out of domains
2355 * and their respective RMRR info is lost. We exempt USB devices
2356 * from this process due to their usage of RMRRs that are known
2357 * to not be needed after BIOS hand-off to OS.
2358 */
2359 if (device_has_rmrr(pdev) &&
2360 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2361 return 0;
2362
2332 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) 2363 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2333 return 1; 2364 return 1;
2334 2365
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index badc17c2bcb4..18108c1405e2 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -16,13 +16,13 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/ioport.h> 18#include <linux/ioport.h>
19#include <linux/clk.h>
20#include <linux/platform_device.h> 19#include <linux/platform_device.h>
21#include <linux/iommu.h> 20#include <linux/iommu.h>
22#include <linux/omap-iommu.h> 21#include <linux/omap-iommu.h>
23#include <linux/mutex.h> 22#include <linux/mutex.h>
24#include <linux/spinlock.h> 23#include <linux/spinlock.h>
25#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/pm_runtime.h>
26 26
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28 28
@@ -143,31 +143,44 @@ EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
143static int iommu_enable(struct omap_iommu *obj) 143static int iommu_enable(struct omap_iommu *obj)
144{ 144{
145 int err; 145 int err;
146 struct platform_device *pdev = to_platform_device(obj->dev);
147 struct iommu_platform_data *pdata = pdev->dev.platform_data;
146 148
147 if (!obj) 149 if (!obj || !pdata)
148 return -EINVAL; 150 return -EINVAL;
149 151
150 if (!arch_iommu) 152 if (!arch_iommu)
151 return -ENODEV; 153 return -ENODEV;
152 154
153 clk_enable(obj->clk); 155 if (pdata->deassert_reset) {
156 err = pdata->deassert_reset(pdev, pdata->reset_name);
157 if (err) {
158 dev_err(obj->dev, "deassert_reset failed: %d\n", err);
159 return err;
160 }
161 }
162
163 pm_runtime_get_sync(obj->dev);
154 164
155 err = arch_iommu->enable(obj); 165 err = arch_iommu->enable(obj);
156 166
157 clk_disable(obj->clk);
158 return err; 167 return err;
159} 168}
160 169
161static void iommu_disable(struct omap_iommu *obj) 170static void iommu_disable(struct omap_iommu *obj)
162{ 171{
163 if (!obj) 172 struct platform_device *pdev = to_platform_device(obj->dev);
164 return; 173 struct iommu_platform_data *pdata = pdev->dev.platform_data;
165 174
166 clk_enable(obj->clk); 175 if (!obj || !pdata)
176 return;
167 177
168 arch_iommu->disable(obj); 178 arch_iommu->disable(obj);
169 179
170 clk_disable(obj->clk); 180 pm_runtime_put_sync(obj->dev);
181
182 if (pdata->assert_reset)
183 pdata->assert_reset(pdev, pdata->reset_name);
171} 184}
172 185
173/* 186/*
@@ -290,7 +303,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
290 if (!obj || !obj->nr_tlb_entries || !e) 303 if (!obj || !obj->nr_tlb_entries || !e)
291 return -EINVAL; 304 return -EINVAL;
292 305
293 clk_enable(obj->clk); 306 pm_runtime_get_sync(obj->dev);
294 307
295 iotlb_lock_get(obj, &l); 308 iotlb_lock_get(obj, &l);
296 if (l.base == obj->nr_tlb_entries) { 309 if (l.base == obj->nr_tlb_entries) {
@@ -320,7 +333,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
320 333
321 cr = iotlb_alloc_cr(obj, e); 334 cr = iotlb_alloc_cr(obj, e);
322 if (IS_ERR(cr)) { 335 if (IS_ERR(cr)) {
323 clk_disable(obj->clk); 336 pm_runtime_put_sync(obj->dev);
324 return PTR_ERR(cr); 337 return PTR_ERR(cr);
325 } 338 }
326 339
@@ -334,7 +347,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
334 l.vict = l.base; 347 l.vict = l.base;
335 iotlb_lock_set(obj, &l); 348 iotlb_lock_set(obj, &l);
336out: 349out:
337 clk_disable(obj->clk); 350 pm_runtime_put_sync(obj->dev);
338 return err; 351 return err;
339} 352}
340 353
@@ -364,7 +377,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
364 int i; 377 int i;
365 struct cr_regs cr; 378 struct cr_regs cr;
366 379
367 clk_enable(obj->clk); 380 pm_runtime_get_sync(obj->dev);
368 381
369 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { 382 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
370 u32 start; 383 u32 start;
@@ -383,7 +396,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
383 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 396 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
384 } 397 }
385 } 398 }
386 clk_disable(obj->clk); 399 pm_runtime_put_sync(obj->dev);
387 400
388 if (i == obj->nr_tlb_entries) 401 if (i == obj->nr_tlb_entries)
389 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); 402 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
@@ -397,7 +410,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
397{ 410{
398 struct iotlb_lock l; 411 struct iotlb_lock l;
399 412
400 clk_enable(obj->clk); 413 pm_runtime_get_sync(obj->dev);
401 414
402 l.base = 0; 415 l.base = 0;
403 l.vict = 0; 416 l.vict = 0;
@@ -405,7 +418,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
405 418
406 iommu_write_reg(obj, 1, MMU_GFLUSH); 419 iommu_write_reg(obj, 1, MMU_GFLUSH);
407 420
408 clk_disable(obj->clk); 421 pm_runtime_put_sync(obj->dev);
409} 422}
410 423
411#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) 424#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
@@ -415,11 +428,11 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
415 if (!obj || !buf) 428 if (!obj || !buf)
416 return -EINVAL; 429 return -EINVAL;
417 430
418 clk_enable(obj->clk); 431 pm_runtime_get_sync(obj->dev);
419 432
420 bytes = arch_iommu->dump_ctx(obj, buf, bytes); 433 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
421 434
422 clk_disable(obj->clk); 435 pm_runtime_put_sync(obj->dev);
423 436
424 return bytes; 437 return bytes;
425} 438}
@@ -433,7 +446,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
433 struct cr_regs tmp; 446 struct cr_regs tmp;
434 struct cr_regs *p = crs; 447 struct cr_regs *p = crs;
435 448
436 clk_enable(obj->clk); 449 pm_runtime_get_sync(obj->dev);
437 iotlb_lock_get(obj, &saved); 450 iotlb_lock_get(obj, &saved);
438 451
439 for_each_iotlb_cr(obj, num, i, tmp) { 452 for_each_iotlb_cr(obj, num, i, tmp) {
@@ -443,7 +456,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
443 } 456 }
444 457
445 iotlb_lock_set(obj, &saved); 458 iotlb_lock_set(obj, &saved);
446 clk_disable(obj->clk); 459 pm_runtime_put_sync(obj->dev);
447 460
448 return p - crs; 461 return p - crs;
449} 462}
@@ -807,9 +820,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
807 if (!obj->refcount) 820 if (!obj->refcount)
808 return IRQ_NONE; 821 return IRQ_NONE;
809 822
810 clk_enable(obj->clk);
811 errs = iommu_report_fault(obj, &da); 823 errs = iommu_report_fault(obj, &da);
812 clk_disable(obj->clk);
813 if (errs == 0) 824 if (errs == 0)
814 return IRQ_HANDLED; 825 return IRQ_HANDLED;
815 826
@@ -931,17 +942,10 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
931 struct resource *res; 942 struct resource *res;
932 struct iommu_platform_data *pdata = pdev->dev.platform_data; 943 struct iommu_platform_data *pdata = pdev->dev.platform_data;
933 944
934 if (pdev->num_resources != 2)
935 return -EINVAL;
936
937 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 945 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
938 if (!obj) 946 if (!obj)
939 return -ENOMEM; 947 return -ENOMEM;
940 948
941 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
942 if (IS_ERR(obj->clk))
943 goto err_clk;
944
945 obj->nr_tlb_entries = pdata->nr_tlb_entries; 949 obj->nr_tlb_entries = pdata->nr_tlb_entries;
946 obj->name = pdata->name; 950 obj->name = pdata->name;
947 obj->dev = &pdev->dev; 951 obj->dev = &pdev->dev;
@@ -984,6 +988,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
984 goto err_irq; 988 goto err_irq;
985 platform_set_drvdata(pdev, obj); 989 platform_set_drvdata(pdev, obj);
986 990
991 pm_runtime_irq_safe(obj->dev);
992 pm_runtime_enable(obj->dev);
993
987 dev_info(&pdev->dev, "%s registered\n", obj->name); 994 dev_info(&pdev->dev, "%s registered\n", obj->name);
988 return 0; 995 return 0;
989 996
@@ -992,8 +999,6 @@ err_irq:
992err_ioremap: 999err_ioremap:
993 release_mem_region(res->start, resource_size(res)); 1000 release_mem_region(res->start, resource_size(res));
994err_mem: 1001err_mem:
995 clk_put(obj->clk);
996err_clk:
997 kfree(obj); 1002 kfree(obj);
998 return err; 1003 return err;
999} 1004}
@@ -1014,7 +1019,8 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
1014 release_mem_region(res->start, resource_size(res)); 1019 release_mem_region(res->start, resource_size(res));
1015 iounmap(obj->regbase); 1020 iounmap(obj->regbase);
1016 1021
1017 clk_put(obj->clk); 1022 pm_runtime_disable(obj->dev);
1023
1018 dev_info(&pdev->dev, "%s removed\n", obj->name); 1024 dev_info(&pdev->dev, "%s removed\n", obj->name);
1019 kfree(obj); 1025 kfree(obj);
1020 return 0; 1026 return 0;
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 2b5f3c04d167..120084206602 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -29,7 +29,6 @@ struct iotlb_entry {
29struct omap_iommu { 29struct omap_iommu {
30 const char *name; 30 const char *name;
31 struct module *owner; 31 struct module *owner;
32 struct clk *clk;
33 void __iomem *regbase; 32 void __iomem *regbase;
34 struct device *dev; 33 struct device *dev;
35 void *isr_priv; 34 void *isr_priv;
@@ -116,8 +115,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
116 * MMU Register offsets 115 * MMU Register offsets
117 */ 116 */
118#define MMU_REVISION 0x00 117#define MMU_REVISION 0x00
119#define MMU_SYSCONFIG 0x10
120#define MMU_SYSSTATUS 0x14
121#define MMU_IRQSTATUS 0x18 118#define MMU_IRQSTATUS 0x18
122#define MMU_IRQENABLE 0x1c 119#define MMU_IRQENABLE 0x1c
123#define MMU_WALKING_ST 0x40 120#define MMU_WALKING_ST 0x40
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c
index c02020292377..d745094a69dd 100644
--- a/drivers/iommu/omap-iommu2.c
+++ b/drivers/iommu/omap-iommu2.c
@@ -28,19 +28,6 @@
28 */ 28 */
29#define IOMMU_ARCH_VERSION 0x00000011 29#define IOMMU_ARCH_VERSION 0x00000011
30 30
31/* SYSCONF */
32#define MMU_SYS_IDLE_SHIFT 3
33#define MMU_SYS_IDLE_FORCE (0 << MMU_SYS_IDLE_SHIFT)
34#define MMU_SYS_IDLE_NONE (1 << MMU_SYS_IDLE_SHIFT)
35#define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT)
36#define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT)
37
38#define MMU_SYS_SOFTRESET (1 << 1)
39#define MMU_SYS_AUTOIDLE 1
40
41/* SYSSTATUS */
42#define MMU_SYS_RESETDONE 1
43
44/* IRQSTATUS & IRQENABLE */ 31/* IRQSTATUS & IRQENABLE */
45#define MMU_IRQ_MULTIHITFAULT (1 << 4) 32#define MMU_IRQ_MULTIHITFAULT (1 << 4)
46#define MMU_IRQ_TABLEWALKFAULT (1 << 3) 33#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
@@ -97,7 +84,6 @@ static void __iommu_set_twl(struct omap_iommu *obj, bool on)
97static int omap2_iommu_enable(struct omap_iommu *obj) 84static int omap2_iommu_enable(struct omap_iommu *obj)
98{ 85{
99 u32 l, pa; 86 u32 l, pa;
100 unsigned long timeout;
101 87
102 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) 88 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
103 return -EINVAL; 89 return -EINVAL;
@@ -106,29 +92,10 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
106 if (!IS_ALIGNED(pa, SZ_16K)) 92 if (!IS_ALIGNED(pa, SZ_16K))
107 return -EINVAL; 93 return -EINVAL;
108 94
109 iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG);
110
111 timeout = jiffies + msecs_to_jiffies(20);
112 do {
113 l = iommu_read_reg(obj, MMU_SYSSTATUS);
114 if (l & MMU_SYS_RESETDONE)
115 break;
116 } while (!time_after(jiffies, timeout));
117
118 if (!(l & MMU_SYS_RESETDONE)) {
119 dev_err(obj->dev, "can't take mmu out of reset\n");
120 return -ENODEV;
121 }
122
123 l = iommu_read_reg(obj, MMU_REVISION); 95 l = iommu_read_reg(obj, MMU_REVISION);
124 dev_info(obj->dev, "%s: version %d.%d\n", obj->name, 96 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
125 (l >> 4) & 0xf, l & 0xf); 97 (l >> 4) & 0xf, l & 0xf);
126 98
127 l = iommu_read_reg(obj, MMU_SYSCONFIG);
128 l &= ~MMU_SYS_IDLE_MASK;
129 l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
130 iommu_write_reg(obj, l, MMU_SYSCONFIG);
131
132 iommu_write_reg(obj, pa, MMU_TTB); 99 iommu_write_reg(obj, pa, MMU_TTB);
133 100
134 __iommu_set_twl(obj, true); 101 __iommu_set_twl(obj, true);
@@ -142,7 +109,6 @@ static void omap2_iommu_disable(struct omap_iommu *obj)
142 109
143 l &= ~MMU_CNTL_MASK; 110 l &= ~MMU_CNTL_MASK;
144 iommu_write_reg(obj, l, MMU_CNTL); 111 iommu_write_reg(obj, l, MMU_CNTL);
145 iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG);
146 112
147 dev_dbg(obj->dev, "%s is shutting down\n", obj->name); 113 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
148} 114}
@@ -271,8 +237,6 @@ omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
271 char *p = buf; 237 char *p = buf;
272 238
273 pr_reg(REVISION); 239 pr_reg(REVISION);
274 pr_reg(SYSCONFIG);
275 pr_reg(SYSSTATUS);
276 pr_reg(IRQSTATUS); 240 pr_reg(IRQSTATUS);
277 pr_reg(IRQENABLE); 241 pr_reg(IRQENABLE);
278 pr_reg(WALKING_ST); 242 pr_reg(WALKING_ST);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index c16e8fc8a4bd..4c9db62814ff 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -398,6 +398,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
398 do_gart_setup(gart, NULL); 398 do_gart_setup(gart, NULL);
399 399
400 gart_handle = gart; 400 gart_handle = gart;
401 bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
401 return 0; 402 return 0;
402 403
403fail: 404fail:
@@ -450,7 +451,6 @@ static struct platform_driver tegra_gart_driver = {
450 451
451static int __devinit tegra_gart_init(void) 452static int __devinit tegra_gart_init(void)
452{ 453{
453 bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
454 return platform_driver_register(&tegra_gart_driver); 454 return platform_driver_register(&tegra_gart_driver);
455} 455}
456 456
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 4252d743963d..25c1210c0832 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -694,10 +694,8 @@ static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
694 *pte = _PTE_VACANT(iova); 694 *pte = _PTE_VACANT(iova);
695 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); 695 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
696 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0); 696 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
697 if (!--(*count)) { 697 if (!--(*count))
698 free_ptbl(as, iova); 698 free_ptbl(as, iova);
699 smmu_flush_regs(as->smmu, 0);
700 }
701} 699}
702 700
703static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, 701static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
@@ -1232,6 +1230,7 @@ static int tegra_smmu_probe(struct platform_device *pdev)
1232 1230
1233 smmu_debugfs_create(smmu); 1231 smmu_debugfs_create(smmu);
1234 smmu_handle = smmu; 1232 smmu_handle = smmu;
1233 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
1235 return 0; 1234 return 0;
1236} 1235}
1237 1236
@@ -1276,7 +1275,6 @@ static struct platform_driver tegra_smmu_driver = {
1276 1275
1277static int __devinit tegra_smmu_init(void) 1276static int __devinit tegra_smmu_init(void)
1278{ 1277{
1279 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
1280 return platform_driver_register(&tegra_smmu_driver); 1278 return platform_driver_register(&tegra_smmu_driver);
1281} 1279}
1282 1280
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index 171ad8aedc83..fc0e34ce038f 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -39,6 +39,8 @@ extern void debug_dma_map_page(struct device *dev, struct page *page,
39 int direction, dma_addr_t dma_addr, 39 int direction, dma_addr_t dma_addr,
40 bool map_single); 40 bool map_single);
41 41
42extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
43
42extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 44extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
43 size_t size, int direction, bool map_single); 45 size_t size, int direction, bool map_single);
44 46
@@ -105,6 +107,11 @@ static inline void debug_dma_map_page(struct device *dev, struct page *page,
105{ 107{
106} 108}
107 109
110static inline void debug_dma_mapping_error(struct device *dev,
111 dma_addr_t dma_addr)
112{
113}
114
108static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 115static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
109 size_t size, int direction, 116 size_t size, int direction,
110 bool map_single) 117 bool map_single)
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h
index c677b9f2fefa..5b429c43a297 100644
--- a/include/linux/platform_data/iommu-omap.h
+++ b/include/linux/platform_data/iommu-omap.h
@@ -10,6 +10,8 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/platform_device.h>
14
13#define MMU_REG_SIZE 256 15#define MMU_REG_SIZE 256
14 16
15/** 17/**
@@ -42,8 +44,11 @@ struct omap_mmu_dev_attr {
42 44
43struct iommu_platform_data { 45struct iommu_platform_data {
44 const char *name; 46 const char *name;
45 const char *clk_name; 47 const char *reset_name;
46 const int nr_tlb_entries; 48 int nr_tlb_entries;
47 u32 da_start; 49 u32 da_start;
48 u32 da_end; 50 u32 da_end;
51
52 int (*assert_reset)(struct platform_device *pdev, const char *name);
53 int (*deassert_reset)(struct platform_device *pdev, const char *name);
49}; 54};
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index d84beb994f36..5e396accd3d0 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -45,6 +45,12 @@ enum {
45 dma_debug_coherent, 45 dma_debug_coherent,
46}; 46};
47 47
48enum map_err_types {
49 MAP_ERR_CHECK_NOT_APPLICABLE,
50 MAP_ERR_NOT_CHECKED,
51 MAP_ERR_CHECKED,
52};
53
48#define DMA_DEBUG_STACKTRACE_ENTRIES 5 54#define DMA_DEBUG_STACKTRACE_ENTRIES 5
49 55
50struct dma_debug_entry { 56struct dma_debug_entry {
@@ -57,6 +63,7 @@ struct dma_debug_entry {
57 int direction; 63 int direction;
58 int sg_call_ents; 64 int sg_call_ents;
59 int sg_mapped_ents; 65 int sg_mapped_ents;
66 enum map_err_types map_err_type;
60#ifdef CONFIG_STACKTRACE 67#ifdef CONFIG_STACKTRACE
61 struct stack_trace stacktrace; 68 struct stack_trace stacktrace;
62 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; 69 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
@@ -114,6 +121,12 @@ static struct device_driver *current_driver __read_mostly;
114 121
115static DEFINE_RWLOCK(driver_name_lock); 122static DEFINE_RWLOCK(driver_name_lock);
116 123
124static const char *const maperr2str[] = {
125 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
126 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
127 [MAP_ERR_CHECKED] = "dma map error checked",
128};
129
117static const char *type2name[4] = { "single", "page", 130static const char *type2name[4] = { "single", "page",
118 "scather-gather", "coherent" }; 131 "scather-gather", "coherent" };
119 132
@@ -376,11 +389,12 @@ void debug_dma_dump_mappings(struct device *dev)
376 list_for_each_entry(entry, &bucket->list, list) { 389 list_for_each_entry(entry, &bucket->list, list) {
377 if (!dev || dev == entry->dev) { 390 if (!dev || dev == entry->dev) {
378 dev_info(entry->dev, 391 dev_info(entry->dev,
379 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", 392 "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n",
380 type2name[entry->type], idx, 393 type2name[entry->type], idx,
381 (unsigned long long)entry->paddr, 394 (unsigned long long)entry->paddr,
382 entry->dev_addr, entry->size, 395 entry->dev_addr, entry->size,
383 dir2name[entry->direction]); 396 dir2name[entry->direction],
397 maperr2str[entry->map_err_type]);
384 } 398 }
385 } 399 }
386 400
@@ -844,16 +858,16 @@ static void check_unmap(struct dma_debug_entry *ref)
844 struct hash_bucket *bucket; 858 struct hash_bucket *bucket;
845 unsigned long flags; 859 unsigned long flags;
846 860
847 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
848 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
849 "to free an invalid DMA memory address\n");
850 return;
851 }
852
853 bucket = get_hash_bucket(ref, &flags); 861 bucket = get_hash_bucket(ref, &flags);
854 entry = bucket_find_exact(bucket, ref); 862 entry = bucket_find_exact(bucket, ref);
855 863
856 if (!entry) { 864 if (!entry) {
865 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
866 err_printk(ref->dev, NULL,
867 "DMA-API: device driver tries "
868 "to free an invalid DMA memory address\n");
869 return;
870 }
857 err_printk(ref->dev, NULL, "DMA-API: device driver tries " 871 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
858 "to free DMA memory it has not allocated " 872 "to free DMA memory it has not allocated "
859 "[device address=0x%016llx] [size=%llu bytes]\n", 873 "[device address=0x%016llx] [size=%llu bytes]\n",
@@ -910,6 +924,15 @@ static void check_unmap(struct dma_debug_entry *ref)
910 dir2name[ref->direction]); 924 dir2name[ref->direction]);
911 } 925 }
912 926
927 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
928 err_printk(ref->dev, entry,
929 "DMA-API: device driver failed to check map error"
930 "[device address=0x%016llx] [size=%llu bytes] "
931 "[mapped as %s]",
932 ref->dev_addr, ref->size,
933 type2name[entry->type]);
934 }
935
913 hash_bucket_del(entry); 936 hash_bucket_del(entry);
914 dma_entry_free(entry); 937 dma_entry_free(entry);
915 938
@@ -1017,7 +1040,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1017 if (unlikely(global_disable)) 1040 if (unlikely(global_disable))
1018 return; 1041 return;
1019 1042
1020 if (unlikely(dma_mapping_error(dev, dma_addr))) 1043 if (dma_mapping_error(dev, dma_addr))
1021 return; 1044 return;
1022 1045
1023 entry = dma_entry_alloc(); 1046 entry = dma_entry_alloc();
@@ -1030,6 +1053,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1030 entry->dev_addr = dma_addr; 1053 entry->dev_addr = dma_addr;
1031 entry->size = size; 1054 entry->size = size;
1032 entry->direction = direction; 1055 entry->direction = direction;
1056 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1033 1057
1034 if (map_single) 1058 if (map_single)
1035 entry->type = dma_debug_single; 1059 entry->type = dma_debug_single;
@@ -1045,6 +1069,30 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1045} 1069}
1046EXPORT_SYMBOL(debug_dma_map_page); 1070EXPORT_SYMBOL(debug_dma_map_page);
1047 1071
1072void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1073{
1074 struct dma_debug_entry ref;
1075 struct dma_debug_entry *entry;
1076 struct hash_bucket *bucket;
1077 unsigned long flags;
1078
1079 if (unlikely(global_disable))
1080 return;
1081
1082 ref.dev = dev;
1083 ref.dev_addr = dma_addr;
1084 bucket = get_hash_bucket(&ref, &flags);
1085 entry = bucket_find_exact(bucket, &ref);
1086
1087 if (!entry)
1088 goto out;
1089
1090 entry->map_err_type = MAP_ERR_CHECKED;
1091out:
1092 put_hash_bucket(bucket, &flags);
1093}
1094EXPORT_SYMBOL(debug_dma_mapping_error);
1095
1048void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 1096void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1049 size_t size, int direction, bool map_single) 1097 size_t size, int direction, bool map_single)
1050{ 1098{