diff options
28 files changed, 869 insertions, 647 deletions
diff --git a/arch/arm/mach-omap2/iommu2.c b/arch/arm/mach-omap2/iommu2.c index f286012783c6..eefc37912ef3 100644 --- a/arch/arm/mach-omap2/iommu2.c +++ b/arch/arm/mach-omap2/iommu2.c | |||
@@ -66,7 +66,7 @@ | |||
66 | ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) | 66 | ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) |
67 | 67 | ||
68 | 68 | ||
69 | static void __iommu_set_twl(struct iommu *obj, bool on) | 69 | static void __iommu_set_twl(struct omap_iommu *obj, bool on) |
70 | { | 70 | { |
71 | u32 l = iommu_read_reg(obj, MMU_CNTL); | 71 | u32 l = iommu_read_reg(obj, MMU_CNTL); |
72 | 72 | ||
@@ -85,7 +85,7 @@ static void __iommu_set_twl(struct iommu *obj, bool on) | |||
85 | } | 85 | } |
86 | 86 | ||
87 | 87 | ||
88 | static int omap2_iommu_enable(struct iommu *obj) | 88 | static int omap2_iommu_enable(struct omap_iommu *obj) |
89 | { | 89 | { |
90 | u32 l, pa; | 90 | u32 l, pa; |
91 | unsigned long timeout; | 91 | unsigned long timeout; |
@@ -127,7 +127,7 @@ static int omap2_iommu_enable(struct iommu *obj) | |||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static void omap2_iommu_disable(struct iommu *obj) | 130 | static void omap2_iommu_disable(struct omap_iommu *obj) |
131 | { | 131 | { |
132 | u32 l = iommu_read_reg(obj, MMU_CNTL); | 132 | u32 l = iommu_read_reg(obj, MMU_CNTL); |
133 | 133 | ||
@@ -138,12 +138,12 @@ static void omap2_iommu_disable(struct iommu *obj) | |||
138 | dev_dbg(obj->dev, "%s is shutting down\n", obj->name); | 138 | dev_dbg(obj->dev, "%s is shutting down\n", obj->name); |
139 | } | 139 | } |
140 | 140 | ||
141 | static void omap2_iommu_set_twl(struct iommu *obj, bool on) | 141 | static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on) |
142 | { | 142 | { |
143 | __iommu_set_twl(obj, false); | 143 | __iommu_set_twl(obj, false); |
144 | } | 144 | } |
145 | 145 | ||
146 | static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra) | 146 | static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra) |
147 | { | 147 | { |
148 | u32 stat, da; | 148 | u32 stat, da; |
149 | u32 errs = 0; | 149 | u32 errs = 0; |
@@ -173,13 +173,13 @@ static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra) | |||
173 | return errs; | 173 | return errs; |
174 | } | 174 | } |
175 | 175 | ||
176 | static void omap2_tlb_read_cr(struct iommu *obj, struct cr_regs *cr) | 176 | static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) |
177 | { | 177 | { |
178 | cr->cam = iommu_read_reg(obj, MMU_READ_CAM); | 178 | cr->cam = iommu_read_reg(obj, MMU_READ_CAM); |
179 | cr->ram = iommu_read_reg(obj, MMU_READ_RAM); | 179 | cr->ram = iommu_read_reg(obj, MMU_READ_RAM); |
180 | } | 180 | } |
181 | 181 | ||
182 | static void omap2_tlb_load_cr(struct iommu *obj, struct cr_regs *cr) | 182 | static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) |
183 | { | 183 | { |
184 | iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); | 184 | iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); |
185 | iommu_write_reg(obj, cr->ram, MMU_RAM); | 185 | iommu_write_reg(obj, cr->ram, MMU_RAM); |
@@ -193,7 +193,8 @@ static u32 omap2_cr_to_virt(struct cr_regs *cr) | |||
193 | return cr->cam & mask; | 193 | return cr->cam & mask; |
194 | } | 194 | } |
195 | 195 | ||
196 | static struct cr_regs *omap2_alloc_cr(struct iommu *obj, struct iotlb_entry *e) | 196 | static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj, |
197 | struct iotlb_entry *e) | ||
197 | { | 198 | { |
198 | struct cr_regs *cr; | 199 | struct cr_regs *cr; |
199 | 200 | ||
@@ -230,7 +231,8 @@ static u32 omap2_get_pte_attr(struct iotlb_entry *e) | |||
230 | return attr; | 231 | return attr; |
231 | } | 232 | } |
232 | 233 | ||
233 | static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf) | 234 | static ssize_t |
235 | omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf) | ||
234 | { | 236 | { |
235 | char *p = buf; | 237 | char *p = buf; |
236 | 238 | ||
@@ -254,7 +256,8 @@ static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf) | |||
254 | goto out; \ | 256 | goto out; \ |
255 | } while (0) | 257 | } while (0) |
256 | 258 | ||
257 | static ssize_t omap2_iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len) | 259 | static ssize_t |
260 | omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) | ||
258 | { | 261 | { |
259 | char *p = buf; | 262 | char *p = buf; |
260 | 263 | ||
@@ -280,7 +283,7 @@ out: | |||
280 | return p - buf; | 283 | return p - buf; |
281 | } | 284 | } |
282 | 285 | ||
283 | static void omap2_iommu_save_ctx(struct iommu *obj) | 286 | static void omap2_iommu_save_ctx(struct omap_iommu *obj) |
284 | { | 287 | { |
285 | int i; | 288 | int i; |
286 | u32 *p = obj->ctx; | 289 | u32 *p = obj->ctx; |
@@ -293,7 +296,7 @@ static void omap2_iommu_save_ctx(struct iommu *obj) | |||
293 | BUG_ON(p[0] != IOMMU_ARCH_VERSION); | 296 | BUG_ON(p[0] != IOMMU_ARCH_VERSION); |
294 | } | 297 | } |
295 | 298 | ||
296 | static void omap2_iommu_restore_ctx(struct iommu *obj) | 299 | static void omap2_iommu_restore_ctx(struct omap_iommu *obj) |
297 | { | 300 | { |
298 | int i; | 301 | int i; |
299 | u32 *p = obj->ctx; | 302 | u32 *p = obj->ctx; |
@@ -343,13 +346,13 @@ static const struct iommu_functions omap2_iommu_ops = { | |||
343 | 346 | ||
344 | static int __init omap2_iommu_init(void) | 347 | static int __init omap2_iommu_init(void) |
345 | { | 348 | { |
346 | return install_iommu_arch(&omap2_iommu_ops); | 349 | return omap_install_iommu_arch(&omap2_iommu_ops); |
347 | } | 350 | } |
348 | module_init(omap2_iommu_init); | 351 | module_init(omap2_iommu_init); |
349 | 352 | ||
350 | static void __exit omap2_iommu_exit(void) | 353 | static void __exit omap2_iommu_exit(void) |
351 | { | 354 | { |
352 | uninstall_iommu_arch(&omap2_iommu_ops); | 355 | omap_uninstall_iommu_arch(&omap2_iommu_ops); |
353 | } | 356 | } |
354 | module_exit(omap2_iommu_exit); | 357 | module_exit(omap2_iommu_exit); |
355 | 358 | ||
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index 6f4edd3408c2..aa59f4247dc5 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig | |||
@@ -134,18 +134,6 @@ config OMAP_MBOX_KFIFO_SIZE | |||
134 | This can also be changed at runtime (via the mbox_kfifo_size | 134 | This can also be changed at runtime (via the mbox_kfifo_size |
135 | module parameter). | 135 | module parameter). |
136 | 136 | ||
137 | config OMAP_IOMMU | ||
138 | tristate | ||
139 | |||
140 | config OMAP_IOMMU_DEBUG | ||
141 | tristate "Export OMAP IOMMU internals in DebugFS" | ||
142 | depends on OMAP_IOMMU && DEBUG_FS | ||
143 | help | ||
144 | Select this to see extensive information about | ||
145 | the internal state of OMAP IOMMU in debugfs. | ||
146 | |||
147 | Say N unless you know you need this. | ||
148 | |||
149 | config OMAP_IOMMU_IVA2 | 137 | config OMAP_IOMMU_IVA2 |
150 | bool | 138 | bool |
151 | 139 | ||
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile index f0233e6abcdf..985262242f25 100644 --- a/arch/arm/plat-omap/Makefile +++ b/arch/arm/plat-omap/Makefile | |||
@@ -18,8 +18,6 @@ obj-$(CONFIG_ARCH_OMAP3) += omap_device.o | |||
18 | obj-$(CONFIG_ARCH_OMAP4) += omap_device.o | 18 | obj-$(CONFIG_ARCH_OMAP4) += omap_device.o |
19 | 19 | ||
20 | obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o | 20 | obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o |
21 | obj-$(CONFIG_OMAP_IOMMU) += iommu.o iovmm.o | ||
22 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += iommu-debug.o | ||
23 | 21 | ||
24 | obj-$(CONFIG_CPU_FREQ) += cpu-omap.o | 22 | obj-$(CONFIG_CPU_FREQ) += cpu-omap.o |
25 | obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o | 23 | obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o |
diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h index 174f1b9c8c03..a1d79ee19250 100644 --- a/arch/arm/plat-omap/include/plat/iommu.h +++ b/arch/arm/plat-omap/include/plat/iommu.h | |||
@@ -25,16 +25,17 @@ struct iotlb_entry { | |||
25 | }; | 25 | }; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | struct iommu { | 28 | struct omap_iommu { |
29 | const char *name; | 29 | const char *name; |
30 | struct module *owner; | 30 | struct module *owner; |
31 | struct clk *clk; | 31 | struct clk *clk; |
32 | void __iomem *regbase; | 32 | void __iomem *regbase; |
33 | struct device *dev; | 33 | struct device *dev; |
34 | void *isr_priv; | 34 | void *isr_priv; |
35 | struct iommu_domain *domain; | ||
35 | 36 | ||
36 | unsigned int refcount; | 37 | unsigned int refcount; |
37 | struct mutex iommu_lock; /* global for this whole object */ | 38 | spinlock_t iommu_lock; /* global for this whole object */ |
38 | 39 | ||
39 | /* | 40 | /* |
40 | * We don't change iopgd for a situation like pgd for a task, | 41 | * We don't change iopgd for a situation like pgd for a task, |
@@ -48,8 +49,6 @@ struct iommu { | |||
48 | struct list_head mmap; | 49 | struct list_head mmap; |
49 | struct mutex mmap_lock; /* protect mmap */ | 50 | struct mutex mmap_lock; /* protect mmap */ |
50 | 51 | ||
51 | int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, void *priv); | ||
52 | |||
53 | void *ctx; /* iommu context: registres saved area */ | 52 | void *ctx; /* iommu context: registres saved area */ |
54 | u32 da_start; | 53 | u32 da_start; |
55 | u32 da_end; | 54 | u32 da_end; |
@@ -81,25 +80,27 @@ struct iotlb_lock { | |||
81 | struct iommu_functions { | 80 | struct iommu_functions { |
82 | unsigned long version; | 81 | unsigned long version; |
83 | 82 | ||
84 | int (*enable)(struct iommu *obj); | 83 | int (*enable)(struct omap_iommu *obj); |
85 | void (*disable)(struct iommu *obj); | 84 | void (*disable)(struct omap_iommu *obj); |
86 | void (*set_twl)(struct iommu *obj, bool on); | 85 | void (*set_twl)(struct omap_iommu *obj, bool on); |
87 | u32 (*fault_isr)(struct iommu *obj, u32 *ra); | 86 | u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra); |
88 | 87 | ||
89 | void (*tlb_read_cr)(struct iommu *obj, struct cr_regs *cr); | 88 | void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr); |
90 | void (*tlb_load_cr)(struct iommu *obj, struct cr_regs *cr); | 89 | void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr); |
91 | 90 | ||
92 | struct cr_regs *(*alloc_cr)(struct iommu *obj, struct iotlb_entry *e); | 91 | struct cr_regs *(*alloc_cr)(struct omap_iommu *obj, |
92 | struct iotlb_entry *e); | ||
93 | int (*cr_valid)(struct cr_regs *cr); | 93 | int (*cr_valid)(struct cr_regs *cr); |
94 | u32 (*cr_to_virt)(struct cr_regs *cr); | 94 | u32 (*cr_to_virt)(struct cr_regs *cr); |
95 | void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); | 95 | void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); |
96 | ssize_t (*dump_cr)(struct iommu *obj, struct cr_regs *cr, char *buf); | 96 | ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr, |
97 | char *buf); | ||
97 | 98 | ||
98 | u32 (*get_pte_attr)(struct iotlb_entry *e); | 99 | u32 (*get_pte_attr)(struct iotlb_entry *e); |
99 | 100 | ||
100 | void (*save_ctx)(struct iommu *obj); | 101 | void (*save_ctx)(struct omap_iommu *obj); |
101 | void (*restore_ctx)(struct iommu *obj); | 102 | void (*restore_ctx)(struct omap_iommu *obj); |
102 | ssize_t (*dump_ctx)(struct iommu *obj, char *buf, ssize_t len); | 103 | ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len); |
103 | }; | 104 | }; |
104 | 105 | ||
105 | struct iommu_platform_data { | 106 | struct iommu_platform_data { |
@@ -150,40 +151,31 @@ struct iommu_platform_data { | |||
150 | /* | 151 | /* |
151 | * global functions | 152 | * global functions |
152 | */ | 153 | */ |
153 | extern u32 iommu_arch_version(void); | 154 | extern u32 omap_iommu_arch_version(void); |
154 | 155 | ||
155 | extern void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); | 156 | extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); |
156 | extern u32 iotlb_cr_to_virt(struct cr_regs *cr); | 157 | |
157 | 158 | extern int | |
158 | extern int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e); | 159 | omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); |
159 | extern void iommu_set_twl(struct iommu *obj, bool on); | 160 | |
160 | extern void flush_iotlb_page(struct iommu *obj, u32 da); | 161 | extern int omap_iommu_set_isr(const char *name, |
161 | extern void flush_iotlb_range(struct iommu *obj, u32 start, u32 end); | 162 | int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs, |
162 | extern void flush_iotlb_all(struct iommu *obj); | ||
163 | |||
164 | extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e); | ||
165 | extern void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, | ||
166 | u32 **ppte); | ||
167 | extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova); | ||
168 | |||
169 | extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end); | ||
170 | extern struct iommu *iommu_get(const char *name); | ||
171 | extern void iommu_put(struct iommu *obj); | ||
172 | extern int iommu_set_isr(const char *name, | ||
173 | int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, | ||
174 | void *priv), | 163 | void *priv), |
175 | void *isr_priv); | 164 | void *isr_priv); |
176 | 165 | ||
177 | extern void iommu_save_ctx(struct iommu *obj); | 166 | extern void omap_iommu_save_ctx(struct omap_iommu *obj); |
178 | extern void iommu_restore_ctx(struct iommu *obj); | 167 | extern void omap_iommu_restore_ctx(struct omap_iommu *obj); |
179 | 168 | ||
180 | extern int install_iommu_arch(const struct iommu_functions *ops); | 169 | extern int omap_install_iommu_arch(const struct iommu_functions *ops); |
181 | extern void uninstall_iommu_arch(const struct iommu_functions *ops); | 170 | extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops); |
182 | 171 | ||
183 | extern int foreach_iommu_device(void *data, | 172 | extern int omap_foreach_iommu_device(void *data, |
184 | int (*fn)(struct device *, void *)); | 173 | int (*fn)(struct device *, void *)); |
185 | 174 | ||
186 | extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len); | 175 | extern ssize_t |
187 | extern size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t len); | 176 | omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); |
177 | extern size_t | ||
178 | omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); | ||
179 | struct device *omap_find_iommu_device(const char *name); | ||
188 | 180 | ||
189 | #endif /* __MACH_IOMMU_H */ | 181 | #endif /* __MACH_IOMMU_H */ |
diff --git a/arch/arm/plat-omap/include/plat/iommu2.h b/arch/arm/plat-omap/include/plat/iommu2.h index 10ad05f410e9..d4116b595e40 100644 --- a/arch/arm/plat-omap/include/plat/iommu2.h +++ b/arch/arm/plat-omap/include/plat/iommu2.h | |||
@@ -83,12 +83,12 @@ | |||
83 | /* | 83 | /* |
84 | * register accessors | 84 | * register accessors |
85 | */ | 85 | */ |
86 | static inline u32 iommu_read_reg(struct iommu *obj, size_t offs) | 86 | static inline u32 iommu_read_reg(struct omap_iommu *obj, size_t offs) |
87 | { | 87 | { |
88 | return __raw_readl(obj->regbase + offs); | 88 | return __raw_readl(obj->regbase + offs); |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline void iommu_write_reg(struct iommu *obj, u32 val, size_t offs) | 91 | static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs) |
92 | { | 92 | { |
93 | __raw_writel(val, obj->regbase + offs); | 93 | __raw_writel(val, obj->regbase + offs); |
94 | } | 94 | } |
diff --git a/arch/arm/plat-omap/iopgtable.h b/arch/arm/plat-omap/include/plat/iopgtable.h index c3e93bb0911f..66a813977d52 100644 --- a/arch/arm/plat-omap/iopgtable.h +++ b/arch/arm/plat-omap/include/plat/iopgtable.h | |||
@@ -56,6 +56,19 @@ | |||
56 | 56 | ||
57 | #define IOPAGE_MASK IOPTE_MASK | 57 | #define IOPAGE_MASK IOPTE_MASK |
58 | 58 | ||
59 | /** | ||
60 | * omap_iommu_translate() - va to pa translation | ||
61 | * @d: omap iommu descriptor | ||
62 | * @va: virtual address | ||
63 | * @mask: omap iommu descriptor mask | ||
64 | * | ||
65 | * va to pa translation | ||
66 | */ | ||
67 | static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) | ||
68 | { | ||
69 | return (d & mask) | (va & (~mask)); | ||
70 | } | ||
71 | |||
59 | /* | 72 | /* |
60 | * some descriptor attributes. | 73 | * some descriptor attributes. |
61 | */ | 74 | */ |
@@ -64,10 +77,15 @@ | |||
64 | #define IOPGD_SUPER (1 << 18 | 2 << 0) | 77 | #define IOPGD_SUPER (1 << 18 | 2 << 0) |
65 | 78 | ||
66 | #define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE) | 79 | #define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE) |
80 | #define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION) | ||
81 | #define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER) | ||
67 | 82 | ||
68 | #define IOPTE_SMALL (2 << 0) | 83 | #define IOPTE_SMALL (2 << 0) |
69 | #define IOPTE_LARGE (1 << 0) | 84 | #define IOPTE_LARGE (1 << 0) |
70 | 85 | ||
86 | #define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL) | ||
87 | #define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE) | ||
88 | |||
71 | /* to find an entry in a page-table-directory */ | 89 | /* to find an entry in a page-table-directory */ |
72 | #define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1)) | 90 | #define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1)) |
73 | #define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) | 91 | #define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) |
@@ -97,6 +115,6 @@ static inline u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, | |||
97 | } | 115 | } |
98 | 116 | ||
99 | #define to_iommu(dev) \ | 117 | #define to_iommu(dev) \ |
100 | (struct iommu *)platform_get_drvdata(to_platform_device(dev)) | 118 | (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)) |
101 | 119 | ||
102 | #endif /* __PLAT_OMAP_IOMMU_H */ | 120 | #endif /* __PLAT_OMAP_IOMMU_H */ |
diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h index e992b9655fbc..6af1a91c0f36 100644 --- a/arch/arm/plat-omap/include/plat/iovmm.h +++ b/arch/arm/plat-omap/include/plat/iovmm.h | |||
@@ -13,8 +13,10 @@ | |||
13 | #ifndef __IOMMU_MMAP_H | 13 | #ifndef __IOMMU_MMAP_H |
14 | #define __IOMMU_MMAP_H | 14 | #define __IOMMU_MMAP_H |
15 | 15 | ||
16 | #include <linux/iommu.h> | ||
17 | |||
16 | struct iovm_struct { | 18 | struct iovm_struct { |
17 | struct iommu *iommu; /* iommu object which this belongs to */ | 19 | struct omap_iommu *iommu; /* iommu object which this belongs to */ |
18 | u32 da_start; /* area definition */ | 20 | u32 da_start; /* area definition */ |
19 | u32 da_end; | 21 | u32 da_end; |
20 | u32 flags; /* IOVMF_: see below */ | 22 | u32 flags; /* IOVMF_: see below */ |
@@ -70,20 +72,18 @@ struct iovm_struct { | |||
70 | #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) | 72 | #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) |
71 | 73 | ||
72 | 74 | ||
73 | extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da); | 75 | extern struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da); |
74 | extern u32 iommu_vmap(struct iommu *obj, u32 da, | 76 | extern u32 |
77 | omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, | ||
75 | const struct sg_table *sgt, u32 flags); | 78 | const struct sg_table *sgt, u32 flags); |
76 | extern struct sg_table *iommu_vunmap(struct iommu *obj, u32 da); | 79 | extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain, |
77 | extern u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, | 80 | struct omap_iommu *obj, u32 da); |
78 | u32 flags); | 81 | extern u32 |
79 | extern void iommu_vfree(struct iommu *obj, const u32 da); | 82 | omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, |
80 | extern u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, | 83 | u32 da, size_t bytes, u32 flags); |
81 | u32 flags); | 84 | extern void |
82 | extern void iommu_kunmap(struct iommu *obj, u32 da); | 85 | omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj, |
83 | extern u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, | 86 | const u32 da); |
84 | u32 flags); | 87 | extern void *omap_da_to_va(struct omap_iommu *obj, u32 da); |
85 | extern void iommu_kfree(struct iommu *obj, u32 da); | ||
86 | |||
87 | extern void *da_to_va(struct iommu *obj, u32 da); | ||
88 | 88 | ||
89 | #endif /* __IOMMU_MMAP_H */ | 89 | #endif /* __IOMMU_MMAP_H */ |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 8213efe1998c..43f4c92816ef 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
34 | #include <linux/iommu.h> | 34 | #include <linux/iommu.h> |
35 | #include <linux/intel-iommu.h> | 35 | #include <linux/intel-iommu.h> |
36 | #include <linux/pci.h> | ||
36 | 37 | ||
37 | #include <asm/pgtable.h> | 38 | #include <asm/pgtable.h> |
38 | #include <asm/gcc_intrin.h> | 39 | #include <asm/gcc_intrin.h> |
@@ -204,7 +205,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
204 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 205 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
205 | break; | 206 | break; |
206 | case KVM_CAP_IOMMU: | 207 | case KVM_CAP_IOMMU: |
207 | r = iommu_found(); | 208 | r = iommu_present(&pci_bus_type); |
208 | break; | 209 | break; |
209 | default: | 210 | default: |
210 | r = 0; | 211 | r = 0; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cf269096eadf..c38efd7b792e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/perf_event.h> | 44 | #include <linux/perf_event.h> |
45 | #include <linux/uaccess.h> | 45 | #include <linux/uaccess.h> |
46 | #include <linux/hash.h> | 46 | #include <linux/hash.h> |
47 | #include <linux/pci.h> | ||
47 | #include <trace/events/kvm.h> | 48 | #include <trace/events/kvm.h> |
48 | 49 | ||
49 | #define CREATE_TRACE_POINTS | 50 | #define CREATE_TRACE_POINTS |
@@ -2123,7 +2124,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
2123 | r = 0; | 2124 | r = 0; |
2124 | break; | 2125 | break; |
2125 | case KVM_CAP_IOMMU: | 2126 | case KVM_CAP_IOMMU: |
2126 | r = iommu_found(); | 2127 | r = iommu_present(&pci_bus_type); |
2127 | break; | 2128 | break; |
2128 | case KVM_CAP_MCE: | 2129 | case KVM_CAP_MCE: |
2129 | r = KVM_MAX_MCE_BANKS; | 2130 | r = KVM_MAX_MCE_BANKS; |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 7d7eaa15e773..5414253b185a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -112,4 +112,23 @@ config IRQ_REMAP | |||
112 | To use x2apic mode in the CPU's which support x2APIC enhancements or | 112 | To use x2apic mode in the CPU's which support x2APIC enhancements or |
113 | to support platforms with CPU's having > 8 bit APIC ID, say Y. | 113 | to support platforms with CPU's having > 8 bit APIC ID, say Y. |
114 | 114 | ||
115 | # OMAP IOMMU support | ||
116 | config OMAP_IOMMU | ||
117 | bool "OMAP IOMMU Support" | ||
118 | depends on ARCH_OMAP | ||
119 | select IOMMU_API | ||
120 | |||
121 | config OMAP_IOVMM | ||
122 | tristate "OMAP IO Virtual Memory Manager Support" | ||
123 | depends on OMAP_IOMMU | ||
124 | |||
125 | config OMAP_IOMMU_DEBUG | ||
126 | tristate "Export OMAP IOMMU/IOVMM internals in DebugFS" | ||
127 | depends on OMAP_IOVMM && DEBUG_FS | ||
128 | help | ||
129 | Select this to see extensive information about | ||
130 | the internal state of OMAP IOMMU/IOVMM in debugfs. | ||
131 | |||
132 | Say N unless you know you need this. | ||
133 | |||
115 | endif # IOMMU_SUPPORT | 134 | endif # IOMMU_SUPPORT |
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 6394994a2b9d..2f4448794bc7 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile | |||
@@ -4,3 +4,6 @@ obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o | |||
4 | obj-$(CONFIG_DMAR_TABLE) += dmar.o | 4 | obj-$(CONFIG_DMAR_TABLE) += dmar.o |
5 | obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o | 5 | obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o |
6 | obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o | 6 | obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o |
7 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o | ||
8 | obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o | ||
9 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o | ||
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 0e4227f457af..4ee277a8521a 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -1283,7 +1283,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
1283 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 1283 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
1284 | continue; | 1284 | continue; |
1285 | 1285 | ||
1286 | dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); | 1286 | dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1); |
1287 | } | 1287 | } |
1288 | 1288 | ||
1289 | update_domain(&dma_dom->domain); | 1289 | update_domain(&dma_dom->domain); |
@@ -2495,7 +2495,7 @@ static unsigned device_dma_ops_init(void) | |||
2495 | 2495 | ||
2496 | void __init amd_iommu_init_api(void) | 2496 | void __init amd_iommu_init_api(void) |
2497 | { | 2497 | { |
2498 | register_iommu(&amd_iommu_ops); | 2498 | bus_set_iommu(&pci_bus_type, &amd_iommu_ops); |
2499 | } | 2499 | } |
2500 | 2500 | ||
2501 | int __init amd_iommu_init_dma_ops(void) | 2501 | int __init amd_iommu_init_dma_ops(void) |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index be1953c239b0..bb161d2fa03c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -3642,7 +3642,7 @@ int __init intel_iommu_init(void) | |||
3642 | 3642 | ||
3643 | init_iommu_pm_ops(); | 3643 | init_iommu_pm_ops(); |
3644 | 3644 | ||
3645 | register_iommu(&intel_iommu_ops); | 3645 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); |
3646 | 3646 | ||
3647 | bus_register_notifier(&pci_bus_type, &device_nb); | 3647 | bus_register_notifier(&pci_bus_type, &device_nb); |
3648 | 3648 | ||
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 6e6b6a11b3ce..2fb2963df553 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -16,6 +16,8 @@ | |||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/device.h> | ||
20 | #include <linux/kernel.h> | ||
19 | #include <linux/bug.h> | 21 | #include <linux/bug.h> |
20 | #include <linux/types.h> | 22 | #include <linux/types.h> |
21 | #include <linux/module.h> | 23 | #include <linux/module.h> |
@@ -23,32 +25,78 @@ | |||
23 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
24 | #include <linux/iommu.h> | 26 | #include <linux/iommu.h> |
25 | 27 | ||
26 | static struct iommu_ops *iommu_ops; | 28 | static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) |
29 | { | ||
30 | } | ||
27 | 31 | ||
28 | void register_iommu(struct iommu_ops *ops) | 32 | /** |
33 | * bus_set_iommu - set iommu-callbacks for the bus | ||
34 | * @bus: bus. | ||
35 | * @ops: the callbacks provided by the iommu-driver | ||
36 | * | ||
37 | * This function is called by an iommu driver to set the iommu methods | ||
38 | * used for a particular bus. Drivers for devices on that bus can use | ||
39 | * the iommu-api after these ops are registered. | ||
40 | * This special function is needed because IOMMUs are usually devices on | ||
41 | * the bus itself, so the iommu drivers are not initialized when the bus | ||
42 | * is set up. With this function the iommu-driver can set the iommu-ops | ||
43 | * afterwards. | ||
44 | */ | ||
45 | int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops) | ||
29 | { | 46 | { |
30 | if (iommu_ops) | 47 | if (bus->iommu_ops != NULL) |
31 | BUG(); | 48 | return -EBUSY; |
49 | |||
50 | bus->iommu_ops = ops; | ||
51 | |||
52 | /* Do IOMMU specific setup for this bus-type */ | ||
53 | iommu_bus_init(bus, ops); | ||
32 | 54 | ||
33 | iommu_ops = ops; | 55 | return 0; |
34 | } | 56 | } |
57 | EXPORT_SYMBOL_GPL(bus_set_iommu); | ||
35 | 58 | ||
36 | bool iommu_found(void) | 59 | bool iommu_present(struct bus_type *bus) |
37 | { | 60 | { |
38 | return iommu_ops != NULL; | 61 | return bus->iommu_ops != NULL; |
39 | } | 62 | } |
40 | EXPORT_SYMBOL_GPL(iommu_found); | 63 | EXPORT_SYMBOL_GPL(iommu_present); |
41 | 64 | ||
42 | struct iommu_domain *iommu_domain_alloc(void) | 65 | /** |
66 | * iommu_set_fault_handler() - set a fault handler for an iommu domain | ||
67 | * @domain: iommu domain | ||
68 | * @handler: fault handler | ||
69 | * | ||
70 | * This function should be used by IOMMU users which want to be notified | ||
71 | * whenever an IOMMU fault happens. | ||
72 | * | ||
73 | * The fault handler itself should return 0 on success, and an appropriate | ||
74 | * error code otherwise. | ||
75 | */ | ||
76 | void iommu_set_fault_handler(struct iommu_domain *domain, | ||
77 | iommu_fault_handler_t handler) | ||
78 | { | ||
79 | BUG_ON(!domain); | ||
80 | |||
81 | domain->handler = handler; | ||
82 | } | ||
83 | EXPORT_SYMBOL_GPL(iommu_set_fault_handler); | ||
84 | |||
85 | struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) | ||
43 | { | 86 | { |
44 | struct iommu_domain *domain; | 87 | struct iommu_domain *domain; |
45 | int ret; | 88 | int ret; |
46 | 89 | ||
90 | if (bus == NULL || bus->iommu_ops == NULL) | ||
91 | return NULL; | ||
92 | |||
47 | domain = kmalloc(sizeof(*domain), GFP_KERNEL); | 93 | domain = kmalloc(sizeof(*domain), GFP_KERNEL); |
48 | if (!domain) | 94 | if (!domain) |
49 | return NULL; | 95 | return NULL; |
50 | 96 | ||
51 | ret = iommu_ops->domain_init(domain); | 97 | domain->ops = bus->iommu_ops; |
98 | |||
99 | ret = domain->ops->domain_init(domain); | ||
52 | if (ret) | 100 | if (ret) |
53 | goto out_free; | 101 | goto out_free; |
54 | 102 | ||
@@ -63,62 +111,78 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc); | |||
63 | 111 | ||
64 | void iommu_domain_free(struct iommu_domain *domain) | 112 | void iommu_domain_free(struct iommu_domain *domain) |
65 | { | 113 | { |
66 | iommu_ops->domain_destroy(domain); | 114 | if (likely(domain->ops->domain_destroy != NULL)) |
115 | domain->ops->domain_destroy(domain); | ||
116 | |||
67 | kfree(domain); | 117 | kfree(domain); |
68 | } | 118 | } |
69 | EXPORT_SYMBOL_GPL(iommu_domain_free); | 119 | EXPORT_SYMBOL_GPL(iommu_domain_free); |
70 | 120 | ||
71 | int iommu_attach_device(struct iommu_domain *domain, struct device *dev) | 121 | int iommu_attach_device(struct iommu_domain *domain, struct device *dev) |
72 | { | 122 | { |
73 | return iommu_ops->attach_dev(domain, dev); | 123 | if (unlikely(domain->ops->attach_dev == NULL)) |
124 | return -ENODEV; | ||
125 | |||
126 | return domain->ops->attach_dev(domain, dev); | ||
74 | } | 127 | } |
75 | EXPORT_SYMBOL_GPL(iommu_attach_device); | 128 | EXPORT_SYMBOL_GPL(iommu_attach_device); |
76 | 129 | ||
77 | void iommu_detach_device(struct iommu_domain *domain, struct device *dev) | 130 | void iommu_detach_device(struct iommu_domain *domain, struct device *dev) |
78 | { | 131 | { |
79 | iommu_ops->detach_dev(domain, dev); | 132 | if (unlikely(domain->ops->detach_dev == NULL)) |
133 | return; | ||
134 | |||
135 | domain->ops->detach_dev(domain, dev); | ||
80 | } | 136 | } |
81 | EXPORT_SYMBOL_GPL(iommu_detach_device); | 137 | EXPORT_SYMBOL_GPL(iommu_detach_device); |
82 | 138 | ||
83 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 139 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
84 | unsigned long iova) | 140 | unsigned long iova) |
85 | { | 141 | { |
86 | return iommu_ops->iova_to_phys(domain, iova); | 142 | if (unlikely(domain->ops->iova_to_phys == NULL)) |
143 | return 0; | ||
144 | |||
145 | return domain->ops->iova_to_phys(domain, iova); | ||
87 | } | 146 | } |
88 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); | 147 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); |
89 | 148 | ||
90 | int iommu_domain_has_cap(struct iommu_domain *domain, | 149 | int iommu_domain_has_cap(struct iommu_domain *domain, |
91 | unsigned long cap) | 150 | unsigned long cap) |
92 | { | 151 | { |
93 | return iommu_ops->domain_has_cap(domain, cap); | 152 | if (unlikely(domain->ops->domain_has_cap == NULL)) |
153 | return 0; | ||
154 | |||
155 | return domain->ops->domain_has_cap(domain, cap); | ||
94 | } | 156 | } |
95 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); | 157 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); |
96 | 158 | ||
97 | int iommu_map(struct iommu_domain *domain, unsigned long iova, | 159 | int iommu_map(struct iommu_domain *domain, unsigned long iova, |
98 | phys_addr_t paddr, int gfp_order, int prot) | 160 | phys_addr_t paddr, int gfp_order, int prot) |
99 | { | 161 | { |
100 | unsigned long invalid_mask; | ||
101 | size_t size; | 162 | size_t size; |
102 | 163 | ||
103 | size = 0x1000UL << gfp_order; | 164 | if (unlikely(domain->ops->map == NULL)) |
104 | invalid_mask = size - 1; | 165 | return -ENODEV; |
105 | 166 | ||
106 | BUG_ON((iova | paddr) & invalid_mask); | 167 | size = PAGE_SIZE << gfp_order; |
107 | 168 | ||
108 | return iommu_ops->map(domain, iova, paddr, gfp_order, prot); | 169 | BUG_ON(!IS_ALIGNED(iova | paddr, size)); |
170 | |||
171 | return domain->ops->map(domain, iova, paddr, gfp_order, prot); | ||
109 | } | 172 | } |
110 | EXPORT_SYMBOL_GPL(iommu_map); | 173 | EXPORT_SYMBOL_GPL(iommu_map); |
111 | 174 | ||
112 | int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) | 175 | int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) |
113 | { | 176 | { |
114 | unsigned long invalid_mask; | ||
115 | size_t size; | 177 | size_t size; |
116 | 178 | ||
117 | size = 0x1000UL << gfp_order; | 179 | if (unlikely(domain->ops->unmap == NULL)) |
118 | invalid_mask = size - 1; | 180 | return -ENODEV; |
181 | |||
182 | size = PAGE_SIZE << gfp_order; | ||
119 | 183 | ||
120 | BUG_ON(iova & invalid_mask); | 184 | BUG_ON(!IS_ALIGNED(iova, size)); |
121 | 185 | ||
122 | return iommu_ops->unmap(domain, iova, gfp_order); | 186 | return domain->ops->unmap(domain, iova, gfp_order); |
123 | } | 187 | } |
124 | EXPORT_SYMBOL_GPL(iommu_unmap); | 188 | EXPORT_SYMBOL_GPL(iommu_unmap); |
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 1a584e077c61..5865dd2e28f9 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c | |||
@@ -543,6 +543,13 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, | |||
543 | } | 543 | } |
544 | 544 | ||
545 | ret = __flush_iotlb(domain); | 545 | ret = __flush_iotlb(domain); |
546 | |||
547 | /* | ||
548 | * the IOMMU API requires us to return the order of the unmapped | ||
549 | * page (on success). | ||
550 | */ | ||
551 | if (!ret) | ||
552 | ret = order; | ||
546 | fail: | 553 | fail: |
547 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | 554 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
548 | return ret; | 555 | return ret; |
@@ -721,7 +728,7 @@ static void __init setup_iommu_tex_classes(void) | |||
721 | static int __init msm_iommu_init(void) | 728 | static int __init msm_iommu_init(void) |
722 | { | 729 | { |
723 | setup_iommu_tex_classes(); | 730 | setup_iommu_tex_classes(); |
724 | register_iommu(&msm_iommu_ops); | 731 | bus_set_iommu(&platform_bus_type, &msm_iommu_ops); |
725 | return 0; | 732 | return 0; |
726 | } | 733 | } |
727 | 734 | ||
diff --git a/arch/arm/plat-omap/iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index f07cf2f08e09..9c192e79f806 100644 --- a/arch/arm/plat-omap/iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <plat/iommu.h> | 21 | #include <plat/iommu.h> |
22 | #include <plat/iovmm.h> | 22 | #include <plat/iovmm.h> |
23 | 23 | ||
24 | #include "iopgtable.h" | 24 | #include <plat/iopgtable.h> |
25 | 25 | ||
26 | #define MAXCOLUMN 100 /* for short messages */ | 26 | #define MAXCOLUMN 100 /* for short messages */ |
27 | 27 | ||
@@ -32,7 +32,7 @@ static struct dentry *iommu_debug_root; | |||
32 | static ssize_t debug_read_ver(struct file *file, char __user *userbuf, | 32 | static ssize_t debug_read_ver(struct file *file, char __user *userbuf, |
33 | size_t count, loff_t *ppos) | 33 | size_t count, loff_t *ppos) |
34 | { | 34 | { |
35 | u32 ver = iommu_arch_version(); | 35 | u32 ver = omap_iommu_arch_version(); |
36 | char buf[MAXCOLUMN], *p = buf; | 36 | char buf[MAXCOLUMN], *p = buf; |
37 | 37 | ||
38 | p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); | 38 | p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); |
@@ -43,7 +43,7 @@ static ssize_t debug_read_ver(struct file *file, char __user *userbuf, | |||
43 | static ssize_t debug_read_regs(struct file *file, char __user *userbuf, | 43 | static ssize_t debug_read_regs(struct file *file, char __user *userbuf, |
44 | size_t count, loff_t *ppos) | 44 | size_t count, loff_t *ppos) |
45 | { | 45 | { |
46 | struct iommu *obj = file->private_data; | 46 | struct omap_iommu *obj = file->private_data; |
47 | char *p, *buf; | 47 | char *p, *buf; |
48 | ssize_t bytes; | 48 | ssize_t bytes; |
49 | 49 | ||
@@ -54,7 +54,7 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, | |||
54 | 54 | ||
55 | mutex_lock(&iommu_debug_lock); | 55 | mutex_lock(&iommu_debug_lock); |
56 | 56 | ||
57 | bytes = iommu_dump_ctx(obj, p, count); | 57 | bytes = omap_iommu_dump_ctx(obj, p, count); |
58 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); | 58 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); |
59 | 59 | ||
60 | mutex_unlock(&iommu_debug_lock); | 60 | mutex_unlock(&iommu_debug_lock); |
@@ -66,7 +66,7 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, | |||
66 | static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, | 66 | static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, |
67 | size_t count, loff_t *ppos) | 67 | size_t count, loff_t *ppos) |
68 | { | 68 | { |
69 | struct iommu *obj = file->private_data; | 69 | struct omap_iommu *obj = file->private_data; |
70 | char *p, *buf; | 70 | char *p, *buf; |
71 | ssize_t bytes, rest; | 71 | ssize_t bytes, rest; |
72 | 72 | ||
@@ -80,7 +80,7 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, | |||
80 | p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); | 80 | p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); |
81 | p += sprintf(p, "-----------------------------------------\n"); | 81 | p += sprintf(p, "-----------------------------------------\n"); |
82 | rest = count - (p - buf); | 82 | rest = count - (p - buf); |
83 | p += dump_tlb_entries(obj, p, rest); | 83 | p += omap_dump_tlb_entries(obj, p, rest); |
84 | 84 | ||
85 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | 85 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); |
86 | 86 | ||
@@ -96,7 +96,7 @@ static ssize_t debug_write_pagetable(struct file *file, | |||
96 | struct iotlb_entry e; | 96 | struct iotlb_entry e; |
97 | struct cr_regs cr; | 97 | struct cr_regs cr; |
98 | int err; | 98 | int err; |
99 | struct iommu *obj = file->private_data; | 99 | struct omap_iommu *obj = file->private_data; |
100 | char buf[MAXCOLUMN], *p = buf; | 100 | char buf[MAXCOLUMN], *p = buf; |
101 | 101 | ||
102 | count = min(count, sizeof(buf)); | 102 | count = min(count, sizeof(buf)); |
@@ -113,8 +113,8 @@ static ssize_t debug_write_pagetable(struct file *file, | |||
113 | return -EINVAL; | 113 | return -EINVAL; |
114 | } | 114 | } |
115 | 115 | ||
116 | iotlb_cr_to_e(&cr, &e); | 116 | omap_iotlb_cr_to_e(&cr, &e); |
117 | err = iopgtable_store_entry(obj, &e); | 117 | err = omap_iopgtable_store_entry(obj, &e); |
118 | if (err) | 118 | if (err) |
119 | dev_err(obj->dev, "%s: fail to store cr\n", __func__); | 119 | dev_err(obj->dev, "%s: fail to store cr\n", __func__); |
120 | 120 | ||
@@ -136,7 +136,7 @@ static ssize_t debug_write_pagetable(struct file *file, | |||
136 | __err; \ | 136 | __err; \ |
137 | }) | 137 | }) |
138 | 138 | ||
139 | static ssize_t dump_ioptable(struct iommu *obj, char *buf, ssize_t len) | 139 | static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len) |
140 | { | 140 | { |
141 | int i; | 141 | int i; |
142 | u32 *iopgd; | 142 | u32 *iopgd; |
@@ -183,7 +183,7 @@ out: | |||
183 | static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, | 183 | static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, |
184 | size_t count, loff_t *ppos) | 184 | size_t count, loff_t *ppos) |
185 | { | 185 | { |
186 | struct iommu *obj = file->private_data; | 186 | struct omap_iommu *obj = file->private_data; |
187 | char *p, *buf; | 187 | char *p, *buf; |
188 | size_t bytes; | 188 | size_t bytes; |
189 | 189 | ||
@@ -211,7 +211,7 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, | |||
211 | static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, | 211 | static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, |
212 | size_t count, loff_t *ppos) | 212 | size_t count, loff_t *ppos) |
213 | { | 213 | { |
214 | struct iommu *obj = file->private_data; | 214 | struct omap_iommu *obj = file->private_data; |
215 | char *p, *buf; | 215 | char *p, *buf; |
216 | struct iovm_struct *tmp; | 216 | struct iovm_struct *tmp; |
217 | int uninitialized_var(i); | 217 | int uninitialized_var(i); |
@@ -253,7 +253,7 @@ static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, | |||
253 | static ssize_t debug_read_mem(struct file *file, char __user *userbuf, | 253 | static ssize_t debug_read_mem(struct file *file, char __user *userbuf, |
254 | size_t count, loff_t *ppos) | 254 | size_t count, loff_t *ppos) |
255 | { | 255 | { |
256 | struct iommu *obj = file->private_data; | 256 | struct omap_iommu *obj = file->private_data; |
257 | char *p, *buf; | 257 | char *p, *buf; |
258 | struct iovm_struct *area; | 258 | struct iovm_struct *area; |
259 | ssize_t bytes; | 259 | ssize_t bytes; |
@@ -267,7 +267,7 @@ static ssize_t debug_read_mem(struct file *file, char __user *userbuf, | |||
267 | 267 | ||
268 | mutex_lock(&iommu_debug_lock); | 268 | mutex_lock(&iommu_debug_lock); |
269 | 269 | ||
270 | area = find_iovm_area(obj, (u32)ppos); | 270 | area = omap_find_iovm_area(obj, (u32)ppos); |
271 | if (IS_ERR(area)) { | 271 | if (IS_ERR(area)) { |
272 | bytes = -EINVAL; | 272 | bytes = -EINVAL; |
273 | goto err_out; | 273 | goto err_out; |
@@ -286,7 +286,7 @@ err_out: | |||
286 | static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, | 286 | static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, |
287 | size_t count, loff_t *ppos) | 287 | size_t count, loff_t *ppos) |
288 | { | 288 | { |
289 | struct iommu *obj = file->private_data; | 289 | struct omap_iommu *obj = file->private_data; |
290 | struct iovm_struct *area; | 290 | struct iovm_struct *area; |
291 | char *p, *buf; | 291 | char *p, *buf; |
292 | 292 | ||
@@ -304,7 +304,7 @@ static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, | |||
304 | goto err_out; | 304 | goto err_out; |
305 | } | 305 | } |
306 | 306 | ||
307 | area = find_iovm_area(obj, (u32)ppos); | 307 | area = omap_find_iovm_area(obj, (u32)ppos); |
308 | if (IS_ERR(area)) { | 308 | if (IS_ERR(area)) { |
309 | count = -EINVAL; | 309 | count = -EINVAL; |
310 | goto err_out; | 310 | goto err_out; |
@@ -360,7 +360,7 @@ DEBUG_FOPS(mem); | |||
360 | static int iommu_debug_register(struct device *dev, void *data) | 360 | static int iommu_debug_register(struct device *dev, void *data) |
361 | { | 361 | { |
362 | struct platform_device *pdev = to_platform_device(dev); | 362 | struct platform_device *pdev = to_platform_device(dev); |
363 | struct iommu *obj = platform_get_drvdata(pdev); | 363 | struct omap_iommu *obj = platform_get_drvdata(pdev); |
364 | struct dentry *d, *parent; | 364 | struct dentry *d, *parent; |
365 | 365 | ||
366 | if (!obj || !obj->dev) | 366 | if (!obj || !obj->dev) |
@@ -396,7 +396,7 @@ static int __init iommu_debug_init(void) | |||
396 | return -ENOMEM; | 396 | return -ENOMEM; |
397 | iommu_debug_root = d; | 397 | iommu_debug_root = d; |
398 | 398 | ||
399 | err = foreach_iommu_device(d, iommu_debug_register); | 399 | err = omap_foreach_iommu_device(d, iommu_debug_register); |
400 | if (err) | 400 | if (err) |
401 | goto err_out; | 401 | goto err_out; |
402 | return 0; | 402 | return 0; |
diff --git a/arch/arm/plat-omap/iommu.c b/drivers/iommu/omap-iommu.c index 34fc31ee9081..8f32b2bf7587 100644 --- a/arch/arm/plat-omap/iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
@@ -18,18 +18,34 @@ | |||
18 | #include <linux/ioport.h> | 18 | #include <linux/ioport.h> |
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/iommu.h> | ||
22 | #include <linux/mutex.h> | ||
23 | #include <linux/spinlock.h> | ||
21 | 24 | ||
22 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
23 | 26 | ||
24 | #include <plat/iommu.h> | 27 | #include <plat/iommu.h> |
25 | 28 | ||
26 | #include "iopgtable.h" | 29 | #include <plat/iopgtable.h> |
27 | 30 | ||
28 | #define for_each_iotlb_cr(obj, n, __i, cr) \ | 31 | #define for_each_iotlb_cr(obj, n, __i, cr) \ |
29 | for (__i = 0; \ | 32 | for (__i = 0; \ |
30 | (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ | 33 | (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ |
31 | __i++) | 34 | __i++) |
32 | 35 | ||
36 | /** | ||
37 | * struct omap_iommu_domain - omap iommu domain | ||
38 | * @pgtable: the page table | ||
39 | * @iommu_dev: an omap iommu device attached to this domain. only a single | ||
40 | * iommu device can be attached for now. | ||
41 | * @lock: domain lock, should be taken when attaching/detaching | ||
42 | */ | ||
43 | struct omap_iommu_domain { | ||
44 | u32 *pgtable; | ||
45 | struct omap_iommu *iommu_dev; | ||
46 | spinlock_t lock; | ||
47 | }; | ||
48 | |||
33 | /* accommodate the difference between omap1 and omap2/3 */ | 49 | /* accommodate the difference between omap1 and omap2/3 */ |
34 | static const struct iommu_functions *arch_iommu; | 50 | static const struct iommu_functions *arch_iommu; |
35 | 51 | ||
@@ -37,13 +53,13 @@ static struct platform_driver omap_iommu_driver; | |||
37 | static struct kmem_cache *iopte_cachep; | 53 | static struct kmem_cache *iopte_cachep; |
38 | 54 | ||
39 | /** | 55 | /** |
40 | * install_iommu_arch - Install archtecure specific iommu functions | 56 | * omap_install_iommu_arch - Install archtecure specific iommu functions |
41 | * @ops: a pointer to architecture specific iommu functions | 57 | * @ops: a pointer to architecture specific iommu functions |
42 | * | 58 | * |
43 | * There are several kind of iommu algorithm(tlb, pagetable) among | 59 | * There are several kind of iommu algorithm(tlb, pagetable) among |
44 | * omap series. This interface installs such an iommu algorighm. | 60 | * omap series. This interface installs such an iommu algorighm. |
45 | **/ | 61 | **/ |
46 | int install_iommu_arch(const struct iommu_functions *ops) | 62 | int omap_install_iommu_arch(const struct iommu_functions *ops) |
47 | { | 63 | { |
48 | if (arch_iommu) | 64 | if (arch_iommu) |
49 | return -EBUSY; | 65 | return -EBUSY; |
@@ -51,53 +67,53 @@ int install_iommu_arch(const struct iommu_functions *ops) | |||
51 | arch_iommu = ops; | 67 | arch_iommu = ops; |
52 | return 0; | 68 | return 0; |
53 | } | 69 | } |
54 | EXPORT_SYMBOL_GPL(install_iommu_arch); | 70 | EXPORT_SYMBOL_GPL(omap_install_iommu_arch); |
55 | 71 | ||
56 | /** | 72 | /** |
57 | * uninstall_iommu_arch - Uninstall archtecure specific iommu functions | 73 | * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions |
58 | * @ops: a pointer to architecture specific iommu functions | 74 | * @ops: a pointer to architecture specific iommu functions |
59 | * | 75 | * |
60 | * This interface uninstalls the iommu algorighm installed previously. | 76 | * This interface uninstalls the iommu algorighm installed previously. |
61 | **/ | 77 | **/ |
62 | void uninstall_iommu_arch(const struct iommu_functions *ops) | 78 | void omap_uninstall_iommu_arch(const struct iommu_functions *ops) |
63 | { | 79 | { |
64 | if (arch_iommu != ops) | 80 | if (arch_iommu != ops) |
65 | pr_err("%s: not your arch\n", __func__); | 81 | pr_err("%s: not your arch\n", __func__); |
66 | 82 | ||
67 | arch_iommu = NULL; | 83 | arch_iommu = NULL; |
68 | } | 84 | } |
69 | EXPORT_SYMBOL_GPL(uninstall_iommu_arch); | 85 | EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); |
70 | 86 | ||
71 | /** | 87 | /** |
72 | * iommu_save_ctx - Save registers for pm off-mode support | 88 | * omap_iommu_save_ctx - Save registers for pm off-mode support |
73 | * @obj: target iommu | 89 | * @obj: target iommu |
74 | **/ | 90 | **/ |
75 | void iommu_save_ctx(struct iommu *obj) | 91 | void omap_iommu_save_ctx(struct omap_iommu *obj) |
76 | { | 92 | { |
77 | arch_iommu->save_ctx(obj); | 93 | arch_iommu->save_ctx(obj); |
78 | } | 94 | } |
79 | EXPORT_SYMBOL_GPL(iommu_save_ctx); | 95 | EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); |
80 | 96 | ||
81 | /** | 97 | /** |
82 | * iommu_restore_ctx - Restore registers for pm off-mode support | 98 | * omap_iommu_restore_ctx - Restore registers for pm off-mode support |
83 | * @obj: target iommu | 99 | * @obj: target iommu |
84 | **/ | 100 | **/ |
85 | void iommu_restore_ctx(struct iommu *obj) | 101 | void omap_iommu_restore_ctx(struct omap_iommu *obj) |
86 | { | 102 | { |
87 | arch_iommu->restore_ctx(obj); | 103 | arch_iommu->restore_ctx(obj); |
88 | } | 104 | } |
89 | EXPORT_SYMBOL_GPL(iommu_restore_ctx); | 105 | EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); |
90 | 106 | ||
91 | /** | 107 | /** |
92 | * iommu_arch_version - Return running iommu arch version | 108 | * omap_iommu_arch_version - Return running iommu arch version |
93 | **/ | 109 | **/ |
94 | u32 iommu_arch_version(void) | 110 | u32 omap_iommu_arch_version(void) |
95 | { | 111 | { |
96 | return arch_iommu->version; | 112 | return arch_iommu->version; |
97 | } | 113 | } |
98 | EXPORT_SYMBOL_GPL(iommu_arch_version); | 114 | EXPORT_SYMBOL_GPL(omap_iommu_arch_version); |
99 | 115 | ||
100 | static int iommu_enable(struct iommu *obj) | 116 | static int iommu_enable(struct omap_iommu *obj) |
101 | { | 117 | { |
102 | int err; | 118 | int err; |
103 | 119 | ||
@@ -115,7 +131,7 @@ static int iommu_enable(struct iommu *obj) | |||
115 | return err; | 131 | return err; |
116 | } | 132 | } |
117 | 133 | ||
118 | static void iommu_disable(struct iommu *obj) | 134 | static void iommu_disable(struct omap_iommu *obj) |
119 | { | 135 | { |
120 | if (!obj) | 136 | if (!obj) |
121 | return; | 137 | return; |
@@ -130,13 +146,13 @@ static void iommu_disable(struct iommu *obj) | |||
130 | /* | 146 | /* |
131 | * TLB operations | 147 | * TLB operations |
132 | */ | 148 | */ |
133 | void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) | 149 | void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) |
134 | { | 150 | { |
135 | BUG_ON(!cr || !e); | 151 | BUG_ON(!cr || !e); |
136 | 152 | ||
137 | arch_iommu->cr_to_e(cr, e); | 153 | arch_iommu->cr_to_e(cr, e); |
138 | } | 154 | } |
139 | EXPORT_SYMBOL_GPL(iotlb_cr_to_e); | 155 | EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); |
140 | 156 | ||
141 | static inline int iotlb_cr_valid(struct cr_regs *cr) | 157 | static inline int iotlb_cr_valid(struct cr_regs *cr) |
142 | { | 158 | { |
@@ -146,7 +162,7 @@ static inline int iotlb_cr_valid(struct cr_regs *cr) | |||
146 | return arch_iommu->cr_valid(cr); | 162 | return arch_iommu->cr_valid(cr); |
147 | } | 163 | } |
148 | 164 | ||
149 | static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, | 165 | static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, |
150 | struct iotlb_entry *e) | 166 | struct iotlb_entry *e) |
151 | { | 167 | { |
152 | if (!e) | 168 | if (!e) |
@@ -155,23 +171,22 @@ static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, | |||
155 | return arch_iommu->alloc_cr(obj, e); | 171 | return arch_iommu->alloc_cr(obj, e); |
156 | } | 172 | } |
157 | 173 | ||
158 | u32 iotlb_cr_to_virt(struct cr_regs *cr) | 174 | static u32 iotlb_cr_to_virt(struct cr_regs *cr) |
159 | { | 175 | { |
160 | return arch_iommu->cr_to_virt(cr); | 176 | return arch_iommu->cr_to_virt(cr); |
161 | } | 177 | } |
162 | EXPORT_SYMBOL_GPL(iotlb_cr_to_virt); | ||
163 | 178 | ||
164 | static u32 get_iopte_attr(struct iotlb_entry *e) | 179 | static u32 get_iopte_attr(struct iotlb_entry *e) |
165 | { | 180 | { |
166 | return arch_iommu->get_pte_attr(e); | 181 | return arch_iommu->get_pte_attr(e); |
167 | } | 182 | } |
168 | 183 | ||
169 | static u32 iommu_report_fault(struct iommu *obj, u32 *da) | 184 | static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) |
170 | { | 185 | { |
171 | return arch_iommu->fault_isr(obj, da); | 186 | return arch_iommu->fault_isr(obj, da); |
172 | } | 187 | } |
173 | 188 | ||
174 | static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) | 189 | static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) |
175 | { | 190 | { |
176 | u32 val; | 191 | u32 val; |
177 | 192 | ||
@@ -182,7 +197,7 @@ static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) | |||
182 | 197 | ||
183 | } | 198 | } |
184 | 199 | ||
185 | static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) | 200 | static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) |
186 | { | 201 | { |
187 | u32 val; | 202 | u32 val; |
188 | 203 | ||
@@ -192,12 +207,12 @@ static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) | |||
192 | iommu_write_reg(obj, val, MMU_LOCK); | 207 | iommu_write_reg(obj, val, MMU_LOCK); |
193 | } | 208 | } |
194 | 209 | ||
195 | static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) | 210 | static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) |
196 | { | 211 | { |
197 | arch_iommu->tlb_read_cr(obj, cr); | 212 | arch_iommu->tlb_read_cr(obj, cr); |
198 | } | 213 | } |
199 | 214 | ||
200 | static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) | 215 | static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) |
201 | { | 216 | { |
202 | arch_iommu->tlb_load_cr(obj, cr); | 217 | arch_iommu->tlb_load_cr(obj, cr); |
203 | 218 | ||
@@ -211,7 +226,7 @@ static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) | |||
211 | * @cr: contents of cam and ram register | 226 | * @cr: contents of cam and ram register |
212 | * @buf: output buffer | 227 | * @buf: output buffer |
213 | **/ | 228 | **/ |
214 | static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, | 229 | static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, |
215 | char *buf) | 230 | char *buf) |
216 | { | 231 | { |
217 | BUG_ON(!cr || !buf); | 232 | BUG_ON(!cr || !buf); |
@@ -220,7 +235,7 @@ static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, | |||
220 | } | 235 | } |
221 | 236 | ||
222 | /* only used in iotlb iteration for-loop */ | 237 | /* only used in iotlb iteration for-loop */ |
223 | static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n) | 238 | static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) |
224 | { | 239 | { |
225 | struct cr_regs cr; | 240 | struct cr_regs cr; |
226 | struct iotlb_lock l; | 241 | struct iotlb_lock l; |
@@ -238,7 +253,8 @@ static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n) | |||
238 | * @obj: target iommu | 253 | * @obj: target iommu |
239 | * @e: an iommu tlb entry info | 254 | * @e: an iommu tlb entry info |
240 | **/ | 255 | **/ |
241 | int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) | 256 | #ifdef PREFETCH_IOTLB |
257 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) | ||
242 | { | 258 | { |
243 | int err = 0; | 259 | int err = 0; |
244 | struct iotlb_lock l; | 260 | struct iotlb_lock l; |
@@ -294,7 +310,20 @@ out: | |||
294 | clk_disable(obj->clk); | 310 | clk_disable(obj->clk); |
295 | return err; | 311 | return err; |
296 | } | 312 | } |
297 | EXPORT_SYMBOL_GPL(load_iotlb_entry); | 313 | |
314 | #else /* !PREFETCH_IOTLB */ | ||
315 | |||
316 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) | ||
317 | { | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | #endif /* !PREFETCH_IOTLB */ | ||
322 | |||
323 | static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) | ||
324 | { | ||
325 | return load_iotlb_entry(obj, e); | ||
326 | } | ||
298 | 327 | ||
299 | /** | 328 | /** |
300 | * flush_iotlb_page - Clear an iommu tlb entry | 329 | * flush_iotlb_page - Clear an iommu tlb entry |
@@ -303,7 +332,7 @@ EXPORT_SYMBOL_GPL(load_iotlb_entry); | |||
303 | * | 332 | * |
304 | * Clear an iommu tlb entry which includes 'da' address. | 333 | * Clear an iommu tlb entry which includes 'da' address. |
305 | **/ | 334 | **/ |
306 | void flush_iotlb_page(struct iommu *obj, u32 da) | 335 | static void flush_iotlb_page(struct omap_iommu *obj, u32 da) |
307 | { | 336 | { |
308 | int i; | 337 | int i; |
309 | struct cr_regs cr; | 338 | struct cr_regs cr; |
@@ -332,33 +361,12 @@ void flush_iotlb_page(struct iommu *obj, u32 da) | |||
332 | if (i == obj->nr_tlb_entries) | 361 | if (i == obj->nr_tlb_entries) |
333 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); | 362 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); |
334 | } | 363 | } |
335 | EXPORT_SYMBOL_GPL(flush_iotlb_page); | ||
336 | |||
337 | /** | ||
338 | * flush_iotlb_range - Clear an iommu tlb entries | ||
339 | * @obj: target iommu | ||
340 | * @start: iommu device virtual address(start) | ||
341 | * @end: iommu device virtual address(end) | ||
342 | * | ||
343 | * Clear an iommu tlb entry which includes 'da' address. | ||
344 | **/ | ||
345 | void flush_iotlb_range(struct iommu *obj, u32 start, u32 end) | ||
346 | { | ||
347 | u32 da = start; | ||
348 | |||
349 | while (da < end) { | ||
350 | flush_iotlb_page(obj, da); | ||
351 | /* FIXME: Optimize for multiple page size */ | ||
352 | da += IOPTE_SIZE; | ||
353 | } | ||
354 | } | ||
355 | EXPORT_SYMBOL_GPL(flush_iotlb_range); | ||
356 | 364 | ||
357 | /** | 365 | /** |
358 | * flush_iotlb_all - Clear all iommu tlb entries | 366 | * flush_iotlb_all - Clear all iommu tlb entries |
359 | * @obj: target iommu | 367 | * @obj: target iommu |
360 | **/ | 368 | **/ |
361 | void flush_iotlb_all(struct iommu *obj) | 369 | static void flush_iotlb_all(struct omap_iommu *obj) |
362 | { | 370 | { |
363 | struct iotlb_lock l; | 371 | struct iotlb_lock l; |
364 | 372 | ||
@@ -372,28 +380,10 @@ void flush_iotlb_all(struct iommu *obj) | |||
372 | 380 | ||
373 | clk_disable(obj->clk); | 381 | clk_disable(obj->clk); |
374 | } | 382 | } |
375 | EXPORT_SYMBOL_GPL(flush_iotlb_all); | ||
376 | |||
377 | /** | ||
378 | * iommu_set_twl - enable/disable table walking logic | ||
379 | * @obj: target iommu | ||
380 | * @on: enable/disable | ||
381 | * | ||
382 | * Function used to enable/disable TWL. If one wants to work | ||
383 | * exclusively with locked TLB entries and receive notifications | ||
384 | * for TLB miss then call this function to disable TWL. | ||
385 | */ | ||
386 | void iommu_set_twl(struct iommu *obj, bool on) | ||
387 | { | ||
388 | clk_enable(obj->clk); | ||
389 | arch_iommu->set_twl(obj, on); | ||
390 | clk_disable(obj->clk); | ||
391 | } | ||
392 | EXPORT_SYMBOL_GPL(iommu_set_twl); | ||
393 | 383 | ||
394 | #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) | 384 | #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) |
395 | 385 | ||
396 | ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes) | 386 | ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) |
397 | { | 387 | { |
398 | if (!obj || !buf) | 388 | if (!obj || !buf) |
399 | return -EINVAL; | 389 | return -EINVAL; |
@@ -406,9 +396,10 @@ ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes) | |||
406 | 396 | ||
407 | return bytes; | 397 | return bytes; |
408 | } | 398 | } |
409 | EXPORT_SYMBOL_GPL(iommu_dump_ctx); | 399 | EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); |
410 | 400 | ||
411 | static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) | 401 | static int |
402 | __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) | ||
412 | { | 403 | { |
413 | int i; | 404 | int i; |
414 | struct iotlb_lock saved; | 405 | struct iotlb_lock saved; |
@@ -431,11 +422,11 @@ static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) | |||
431 | } | 422 | } |
432 | 423 | ||
433 | /** | 424 | /** |
434 | * dump_tlb_entries - dump cr arrays to given buffer | 425 | * omap_dump_tlb_entries - dump cr arrays to given buffer |
435 | * @obj: target iommu | 426 | * @obj: target iommu |
436 | * @buf: output buffer | 427 | * @buf: output buffer |
437 | **/ | 428 | **/ |
438 | size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) | 429 | size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) |
439 | { | 430 | { |
440 | int i, num; | 431 | int i, num; |
441 | struct cr_regs *cr; | 432 | struct cr_regs *cr; |
@@ -455,14 +446,14 @@ size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) | |||
455 | 446 | ||
456 | return p - buf; | 447 | return p - buf; |
457 | } | 448 | } |
458 | EXPORT_SYMBOL_GPL(dump_tlb_entries); | 449 | EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); |
459 | 450 | ||
460 | int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) | 451 | int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) |
461 | { | 452 | { |
462 | return driver_for_each_device(&omap_iommu_driver.driver, | 453 | return driver_for_each_device(&omap_iommu_driver.driver, |
463 | NULL, data, fn); | 454 | NULL, data, fn); |
464 | } | 455 | } |
465 | EXPORT_SYMBOL_GPL(foreach_iommu_device); | 456 | EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); |
466 | 457 | ||
467 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ | 458 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ |
468 | 459 | ||
@@ -495,7 +486,7 @@ static void iopte_free(u32 *iopte) | |||
495 | kmem_cache_free(iopte_cachep, iopte); | 486 | kmem_cache_free(iopte_cachep, iopte); |
496 | } | 487 | } |
497 | 488 | ||
498 | static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) | 489 | static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) |
499 | { | 490 | { |
500 | u32 *iopte; | 491 | u32 *iopte; |
501 | 492 | ||
@@ -533,7 +524,7 @@ pte_ready: | |||
533 | return iopte; | 524 | return iopte; |
534 | } | 525 | } |
535 | 526 | ||
536 | static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) | 527 | static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
537 | { | 528 | { |
538 | u32 *iopgd = iopgd_offset(obj, da); | 529 | u32 *iopgd = iopgd_offset(obj, da); |
539 | 530 | ||
@@ -548,7 +539,7 @@ static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) | |||
548 | return 0; | 539 | return 0; |
549 | } | 540 | } |
550 | 541 | ||
551 | static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) | 542 | static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
552 | { | 543 | { |
553 | u32 *iopgd = iopgd_offset(obj, da); | 544 | u32 *iopgd = iopgd_offset(obj, da); |
554 | int i; | 545 | int i; |
@@ -565,7 +556,7 @@ static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) | |||
565 | return 0; | 556 | return 0; |
566 | } | 557 | } |
567 | 558 | ||
568 | static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) | 559 | static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
569 | { | 560 | { |
570 | u32 *iopgd = iopgd_offset(obj, da); | 561 | u32 *iopgd = iopgd_offset(obj, da); |
571 | u32 *iopte = iopte_alloc(obj, iopgd, da); | 562 | u32 *iopte = iopte_alloc(obj, iopgd, da); |
@@ -582,7 +573,7 @@ static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) | |||
582 | return 0; | 573 | return 0; |
583 | } | 574 | } |
584 | 575 | ||
585 | static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) | 576 | static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
586 | { | 577 | { |
587 | u32 *iopgd = iopgd_offset(obj, da); | 578 | u32 *iopgd = iopgd_offset(obj, da); |
588 | u32 *iopte = iopte_alloc(obj, iopgd, da); | 579 | u32 *iopte = iopte_alloc(obj, iopgd, da); |
@@ -603,9 +594,10 @@ static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) | |||
603 | return 0; | 594 | return 0; |
604 | } | 595 | } |
605 | 596 | ||
606 | static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) | 597 | static int |
598 | iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) | ||
607 | { | 599 | { |
608 | int (*fn)(struct iommu *, u32, u32, u32); | 600 | int (*fn)(struct omap_iommu *, u32, u32, u32); |
609 | u32 prot; | 601 | u32 prot; |
610 | int err; | 602 | int err; |
611 | 603 | ||
@@ -641,23 +633,21 @@ static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) | |||
641 | } | 633 | } |
642 | 634 | ||
643 | /** | 635 | /** |
644 | * iopgtable_store_entry - Make an iommu pte entry | 636 | * omap_iopgtable_store_entry - Make an iommu pte entry |
645 | * @obj: target iommu | 637 | * @obj: target iommu |
646 | * @e: an iommu tlb entry info | 638 | * @e: an iommu tlb entry info |
647 | **/ | 639 | **/ |
648 | int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) | 640 | int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
649 | { | 641 | { |
650 | int err; | 642 | int err; |
651 | 643 | ||
652 | flush_iotlb_page(obj, e->da); | 644 | flush_iotlb_page(obj, e->da); |
653 | err = iopgtable_store_entry_core(obj, e); | 645 | err = iopgtable_store_entry_core(obj, e); |
654 | #ifdef PREFETCH_IOTLB | ||
655 | if (!err) | 646 | if (!err) |
656 | load_iotlb_entry(obj, e); | 647 | prefetch_iotlb_entry(obj, e); |
657 | #endif | ||
658 | return err; | 648 | return err; |
659 | } | 649 | } |
660 | EXPORT_SYMBOL_GPL(iopgtable_store_entry); | 650 | EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); |
661 | 651 | ||
662 | /** | 652 | /** |
663 | * iopgtable_lookup_entry - Lookup an iommu pte entry | 653 | * iopgtable_lookup_entry - Lookup an iommu pte entry |
@@ -666,7 +656,8 @@ EXPORT_SYMBOL_GPL(iopgtable_store_entry); | |||
666 | * @ppgd: iommu pgd entry pointer to be returned | 656 | * @ppgd: iommu pgd entry pointer to be returned |
667 | * @ppte: iommu pte entry pointer to be returned | 657 | * @ppte: iommu pte entry pointer to be returned |
668 | **/ | 658 | **/ |
669 | void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte) | 659 | static void |
660 | iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) | ||
670 | { | 661 | { |
671 | u32 *iopgd, *iopte = NULL; | 662 | u32 *iopgd, *iopte = NULL; |
672 | 663 | ||
@@ -680,9 +671,8 @@ out: | |||
680 | *ppgd = iopgd; | 671 | *ppgd = iopgd; |
681 | *ppte = iopte; | 672 | *ppte = iopte; |
682 | } | 673 | } |
683 | EXPORT_SYMBOL_GPL(iopgtable_lookup_entry); | ||
684 | 674 | ||
685 | static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) | 675 | static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) |
686 | { | 676 | { |
687 | size_t bytes; | 677 | size_t bytes; |
688 | u32 *iopgd = iopgd_offset(obj, da); | 678 | u32 *iopgd = iopgd_offset(obj, da); |
@@ -735,7 +725,7 @@ out: | |||
735 | * @obj: target iommu | 725 | * @obj: target iommu |
736 | * @da: iommu device virtual address | 726 | * @da: iommu device virtual address |
737 | **/ | 727 | **/ |
738 | size_t iopgtable_clear_entry(struct iommu *obj, u32 da) | 728 | static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) |
739 | { | 729 | { |
740 | size_t bytes; | 730 | size_t bytes; |
741 | 731 | ||
@@ -748,9 +738,8 @@ size_t iopgtable_clear_entry(struct iommu *obj, u32 da) | |||
748 | 738 | ||
749 | return bytes; | 739 | return bytes; |
750 | } | 740 | } |
751 | EXPORT_SYMBOL_GPL(iopgtable_clear_entry); | ||
752 | 741 | ||
753 | static void iopgtable_clear_entry_all(struct iommu *obj) | 742 | static void iopgtable_clear_entry_all(struct omap_iommu *obj) |
754 | { | 743 | { |
755 | int i; | 744 | int i; |
756 | 745 | ||
@@ -785,7 +774,8 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) | |||
785 | { | 774 | { |
786 | u32 da, errs; | 775 | u32 da, errs; |
787 | u32 *iopgd, *iopte; | 776 | u32 *iopgd, *iopte; |
788 | struct iommu *obj = data; | 777 | struct omap_iommu *obj = data; |
778 | struct iommu_domain *domain = obj->domain; | ||
789 | 779 | ||
790 | if (!obj->refcount) | 780 | if (!obj->refcount) |
791 | return IRQ_NONE; | 781 | return IRQ_NONE; |
@@ -797,7 +787,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) | |||
797 | return IRQ_HANDLED; | 787 | return IRQ_HANDLED; |
798 | 788 | ||
799 | /* Fault callback or TLB/PTE Dynamic loading */ | 789 | /* Fault callback or TLB/PTE Dynamic loading */ |
800 | if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) | 790 | if (!report_iommu_fault(domain, obj->dev, da, 0)) |
801 | return IRQ_HANDLED; | 791 | return IRQ_HANDLED; |
802 | 792 | ||
803 | iommu_disable(obj); | 793 | iommu_disable(obj); |
@@ -821,7 +811,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) | |||
821 | 811 | ||
822 | static int device_match_by_alias(struct device *dev, void *data) | 812 | static int device_match_by_alias(struct device *dev, void *data) |
823 | { | 813 | { |
824 | struct iommu *obj = to_iommu(dev); | 814 | struct omap_iommu *obj = to_iommu(dev); |
825 | const char *name = data; | 815 | const char *name = data; |
826 | 816 | ||
827 | pr_debug("%s: %s %s\n", __func__, obj->name, name); | 817 | pr_debug("%s: %s %s\n", __func__, obj->name, name); |
@@ -830,57 +820,55 @@ static int device_match_by_alias(struct device *dev, void *data) | |||
830 | } | 820 | } |
831 | 821 | ||
832 | /** | 822 | /** |
833 | * iommu_set_da_range - Set a valid device address range | 823 | * omap_find_iommu_device() - find an omap iommu device by name |
834 | * @obj: target iommu | 824 | * @name: name of the iommu device |
835 | * @start Start of valid range | 825 | * |
836 | * @end End of valid range | 826 | * The generic iommu API requires the caller to provide the device |
837 | **/ | 827 | * he wishes to attach to a certain iommu domain. |
838 | int iommu_set_da_range(struct iommu *obj, u32 start, u32 end) | 828 | * |
829 | * Drivers generally should not bother with this as it should just | ||
830 | * be taken care of by the DMA-API using dev_archdata. | ||
831 | * | ||
832 | * This function is provided as an interim solution until the latter | ||
833 | * materializes, and omap3isp is fully migrated to the DMA-API. | ||
834 | */ | ||
835 | struct device *omap_find_iommu_device(const char *name) | ||
839 | { | 836 | { |
840 | 837 | return driver_find_device(&omap_iommu_driver.driver, NULL, | |
841 | if (!obj) | 838 | (void *)name, |
842 | return -EFAULT; | 839 | device_match_by_alias); |
843 | |||
844 | if (end < start || !PAGE_ALIGN(start | end)) | ||
845 | return -EINVAL; | ||
846 | |||
847 | obj->da_start = start; | ||
848 | obj->da_end = end; | ||
849 | |||
850 | return 0; | ||
851 | } | 840 | } |
852 | EXPORT_SYMBOL_GPL(iommu_set_da_range); | 841 | EXPORT_SYMBOL_GPL(omap_find_iommu_device); |
853 | 842 | ||
854 | /** | 843 | /** |
855 | * iommu_get - Get iommu handler | 844 | * omap_iommu_attach() - attach iommu device to an iommu domain |
856 | * @name: target iommu name | 845 | * @dev: target omap iommu device |
846 | * @iopgd: page table | ||
857 | **/ | 847 | **/ |
858 | struct iommu *iommu_get(const char *name) | 848 | static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd) |
859 | { | 849 | { |
860 | int err = -ENOMEM; | 850 | int err = -ENOMEM; |
861 | struct device *dev; | 851 | struct omap_iommu *obj = to_iommu(dev); |
862 | struct iommu *obj; | ||
863 | |||
864 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, | ||
865 | device_match_by_alias); | ||
866 | if (!dev) | ||
867 | return ERR_PTR(-ENODEV); | ||
868 | |||
869 | obj = to_iommu(dev); | ||
870 | 852 | ||
871 | mutex_lock(&obj->iommu_lock); | 853 | spin_lock(&obj->iommu_lock); |
872 | 854 | ||
873 | if (obj->refcount++ == 0) { | 855 | /* an iommu device can only be attached once */ |
874 | err = iommu_enable(obj); | 856 | if (++obj->refcount > 1) { |
875 | if (err) | 857 | dev_err(dev, "%s: already attached!\n", obj->name); |
876 | goto err_enable; | 858 | err = -EBUSY; |
877 | flush_iotlb_all(obj); | 859 | goto err_enable; |
878 | } | 860 | } |
879 | 861 | ||
862 | obj->iopgd = iopgd; | ||
863 | err = iommu_enable(obj); | ||
864 | if (err) | ||
865 | goto err_enable; | ||
866 | flush_iotlb_all(obj); | ||
867 | |||
880 | if (!try_module_get(obj->owner)) | 868 | if (!try_module_get(obj->owner)) |
881 | goto err_module; | 869 | goto err_module; |
882 | 870 | ||
883 | mutex_unlock(&obj->iommu_lock); | 871 | spin_unlock(&obj->iommu_lock); |
884 | 872 | ||
885 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | 873 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); |
886 | return obj; | 874 | return obj; |
@@ -890,59 +878,32 @@ err_module: | |||
890 | iommu_disable(obj); | 878 | iommu_disable(obj); |
891 | err_enable: | 879 | err_enable: |
892 | obj->refcount--; | 880 | obj->refcount--; |
893 | mutex_unlock(&obj->iommu_lock); | 881 | spin_unlock(&obj->iommu_lock); |
894 | return ERR_PTR(err); | 882 | return ERR_PTR(err); |
895 | } | 883 | } |
896 | EXPORT_SYMBOL_GPL(iommu_get); | ||
897 | 884 | ||
898 | /** | 885 | /** |
899 | * iommu_put - Put back iommu handler | 886 | * omap_iommu_detach - release iommu device |
900 | * @obj: target iommu | 887 | * @obj: target iommu |
901 | **/ | 888 | **/ |
902 | void iommu_put(struct iommu *obj) | 889 | static void omap_iommu_detach(struct omap_iommu *obj) |
903 | { | 890 | { |
904 | if (!obj || IS_ERR(obj)) | 891 | if (!obj || IS_ERR(obj)) |
905 | return; | 892 | return; |
906 | 893 | ||
907 | mutex_lock(&obj->iommu_lock); | 894 | spin_lock(&obj->iommu_lock); |
908 | 895 | ||
909 | if (--obj->refcount == 0) | 896 | if (--obj->refcount == 0) |
910 | iommu_disable(obj); | 897 | iommu_disable(obj); |
911 | 898 | ||
912 | module_put(obj->owner); | 899 | module_put(obj->owner); |
913 | 900 | ||
914 | mutex_unlock(&obj->iommu_lock); | 901 | obj->iopgd = NULL; |
915 | |||
916 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | ||
917 | } | ||
918 | EXPORT_SYMBOL_GPL(iommu_put); | ||
919 | |||
920 | int iommu_set_isr(const char *name, | ||
921 | int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, | ||
922 | void *priv), | ||
923 | void *isr_priv) | ||
924 | { | ||
925 | struct device *dev; | ||
926 | struct iommu *obj; | ||
927 | 902 | ||
928 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, | 903 | spin_unlock(&obj->iommu_lock); |
929 | device_match_by_alias); | ||
930 | if (!dev) | ||
931 | return -ENODEV; | ||
932 | 904 | ||
933 | obj = to_iommu(dev); | 905 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); |
934 | mutex_lock(&obj->iommu_lock); | ||
935 | if (obj->refcount != 0) { | ||
936 | mutex_unlock(&obj->iommu_lock); | ||
937 | return -EBUSY; | ||
938 | } | ||
939 | obj->isr = isr; | ||
940 | obj->isr_priv = isr_priv; | ||
941 | mutex_unlock(&obj->iommu_lock); | ||
942 | |||
943 | return 0; | ||
944 | } | 906 | } |
945 | EXPORT_SYMBOL_GPL(iommu_set_isr); | ||
946 | 907 | ||
947 | /* | 908 | /* |
948 | * OMAP Device MMU(IOMMU) detection | 909 | * OMAP Device MMU(IOMMU) detection |
@@ -950,9 +911,8 @@ EXPORT_SYMBOL_GPL(iommu_set_isr); | |||
950 | static int __devinit omap_iommu_probe(struct platform_device *pdev) | 911 | static int __devinit omap_iommu_probe(struct platform_device *pdev) |
951 | { | 912 | { |
952 | int err = -ENODEV; | 913 | int err = -ENODEV; |
953 | void *p; | ||
954 | int irq; | 914 | int irq; |
955 | struct iommu *obj; | 915 | struct omap_iommu *obj; |
956 | struct resource *res; | 916 | struct resource *res; |
957 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | 917 | struct iommu_platform_data *pdata = pdev->dev.platform_data; |
958 | 918 | ||
@@ -974,7 +934,7 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev) | |||
974 | obj->da_start = pdata->da_start; | 934 | obj->da_start = pdata->da_start; |
975 | obj->da_end = pdata->da_end; | 935 | obj->da_end = pdata->da_end; |
976 | 936 | ||
977 | mutex_init(&obj->iommu_lock); | 937 | spin_lock_init(&obj->iommu_lock); |
978 | mutex_init(&obj->mmap_lock); | 938 | mutex_init(&obj->mmap_lock); |
979 | spin_lock_init(&obj->page_table_lock); | 939 | spin_lock_init(&obj->page_table_lock); |
980 | INIT_LIST_HEAD(&obj->mmap); | 940 | INIT_LIST_HEAD(&obj->mmap); |
@@ -1009,22 +969,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev) | |||
1009 | goto err_irq; | 969 | goto err_irq; |
1010 | platform_set_drvdata(pdev, obj); | 970 | platform_set_drvdata(pdev, obj); |
1011 | 971 | ||
1012 | p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE)); | ||
1013 | if (!p) { | ||
1014 | err = -ENOMEM; | ||
1015 | goto err_pgd; | ||
1016 | } | ||
1017 | memset(p, 0, IOPGD_TABLE_SIZE); | ||
1018 | clean_dcache_area(p, IOPGD_TABLE_SIZE); | ||
1019 | obj->iopgd = p; | ||
1020 | |||
1021 | BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE)); | ||
1022 | |||
1023 | dev_info(&pdev->dev, "%s registered\n", obj->name); | 972 | dev_info(&pdev->dev, "%s registered\n", obj->name); |
1024 | return 0; | 973 | return 0; |
1025 | 974 | ||
1026 | err_pgd: | ||
1027 | free_irq(irq, obj); | ||
1028 | err_irq: | 975 | err_irq: |
1029 | iounmap(obj->regbase); | 976 | iounmap(obj->regbase); |
1030 | err_ioremap: | 977 | err_ioremap: |
@@ -1040,12 +987,11 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev) | |||
1040 | { | 987 | { |
1041 | int irq; | 988 | int irq; |
1042 | struct resource *res; | 989 | struct resource *res; |
1043 | struct iommu *obj = platform_get_drvdata(pdev); | 990 | struct omap_iommu *obj = platform_get_drvdata(pdev); |
1044 | 991 | ||
1045 | platform_set_drvdata(pdev, NULL); | 992 | platform_set_drvdata(pdev, NULL); |
1046 | 993 | ||
1047 | iopgtable_clear_entry_all(obj); | 994 | iopgtable_clear_entry_all(obj); |
1048 | free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE)); | ||
1049 | 995 | ||
1050 | irq = platform_get_irq(pdev, 0); | 996 | irq = platform_get_irq(pdev, 0); |
1051 | free_irq(irq, obj); | 997 | free_irq(irq, obj); |
@@ -1072,6 +1018,201 @@ static void iopte_cachep_ctor(void *iopte) | |||
1072 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); | 1018 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); |
1073 | } | 1019 | } |
1074 | 1020 | ||
1021 | static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, | ||
1022 | phys_addr_t pa, int order, int prot) | ||
1023 | { | ||
1024 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
1025 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | ||
1026 | struct device *dev = oiommu->dev; | ||
1027 | size_t bytes = PAGE_SIZE << order; | ||
1028 | struct iotlb_entry e; | ||
1029 | int omap_pgsz; | ||
1030 | u32 ret, flags; | ||
1031 | |||
1032 | /* we only support mapping a single iommu page for now */ | ||
1033 | omap_pgsz = bytes_to_iopgsz(bytes); | ||
1034 | if (omap_pgsz < 0) { | ||
1035 | dev_err(dev, "invalid size to map: %d\n", bytes); | ||
1036 | return -EINVAL; | ||
1037 | } | ||
1038 | |||
1039 | dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); | ||
1040 | |||
1041 | flags = omap_pgsz | prot; | ||
1042 | |||
1043 | iotlb_init_entry(&e, da, pa, flags); | ||
1044 | |||
1045 | ret = omap_iopgtable_store_entry(oiommu, &e); | ||
1046 | if (ret) | ||
1047 | dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); | ||
1048 | |||
1049 | return ret; | ||
1050 | } | ||
1051 | |||
1052 | static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, | ||
1053 | int order) | ||
1054 | { | ||
1055 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
1056 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | ||
1057 | struct device *dev = oiommu->dev; | ||
1058 | size_t unmap_size; | ||
1059 | |||
1060 | dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order); | ||
1061 | |||
1062 | unmap_size = iopgtable_clear_entry(oiommu, da); | ||
1063 | |||
1064 | return unmap_size ? get_order(unmap_size) : -EINVAL; | ||
1065 | } | ||
1066 | |||
1067 | static int | ||
1068 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | ||
1069 | { | ||
1070 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
1071 | struct omap_iommu *oiommu; | ||
1072 | int ret = 0; | ||
1073 | |||
1074 | spin_lock(&omap_domain->lock); | ||
1075 | |||
1076 | /* only a single device is supported per domain for now */ | ||
1077 | if (omap_domain->iommu_dev) { | ||
1078 | dev_err(dev, "iommu domain is already attached\n"); | ||
1079 | ret = -EBUSY; | ||
1080 | goto out; | ||
1081 | } | ||
1082 | |||
1083 | /* get a handle to and enable the omap iommu */ | ||
1084 | oiommu = omap_iommu_attach(dev, omap_domain->pgtable); | ||
1085 | if (IS_ERR(oiommu)) { | ||
1086 | ret = PTR_ERR(oiommu); | ||
1087 | dev_err(dev, "can't get omap iommu: %d\n", ret); | ||
1088 | goto out; | ||
1089 | } | ||
1090 | |||
1091 | omap_domain->iommu_dev = oiommu; | ||
1092 | oiommu->domain = domain; | ||
1093 | |||
1094 | out: | ||
1095 | spin_unlock(&omap_domain->lock); | ||
1096 | return ret; | ||
1097 | } | ||
1098 | |||
1099 | static void omap_iommu_detach_dev(struct iommu_domain *domain, | ||
1100 | struct device *dev) | ||
1101 | { | ||
1102 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
1103 | struct omap_iommu *oiommu = to_iommu(dev); | ||
1104 | |||
1105 | spin_lock(&omap_domain->lock); | ||
1106 | |||
1107 | /* only a single device is supported per domain for now */ | ||
1108 | if (omap_domain->iommu_dev != oiommu) { | ||
1109 | dev_err(dev, "invalid iommu device\n"); | ||
1110 | goto out; | ||
1111 | } | ||
1112 | |||
1113 | iopgtable_clear_entry_all(oiommu); | ||
1114 | |||
1115 | omap_iommu_detach(oiommu); | ||
1116 | |||
1117 | omap_domain->iommu_dev = NULL; | ||
1118 | |||
1119 | out: | ||
1120 | spin_unlock(&omap_domain->lock); | ||
1121 | } | ||
1122 | |||
1123 | static int omap_iommu_domain_init(struct iommu_domain *domain) | ||
1124 | { | ||
1125 | struct omap_iommu_domain *omap_domain; | ||
1126 | |||
1127 | omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); | ||
1128 | if (!omap_domain) { | ||
1129 | pr_err("kzalloc failed\n"); | ||
1130 | goto out; | ||
1131 | } | ||
1132 | |||
1133 | omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); | ||
1134 | if (!omap_domain->pgtable) { | ||
1135 | pr_err("kzalloc failed\n"); | ||
1136 | goto fail_nomem; | ||
1137 | } | ||
1138 | |||
1139 | /* | ||
1140 | * should never fail, but please keep this around to ensure | ||
1141 | * we keep the hardware happy | ||
1142 | */ | ||
1143 | BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); | ||
1144 | |||
1145 | clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); | ||
1146 | spin_lock_init(&omap_domain->lock); | ||
1147 | |||
1148 | domain->priv = omap_domain; | ||
1149 | |||
1150 | return 0; | ||
1151 | |||
1152 | fail_nomem: | ||
1153 | kfree(omap_domain); | ||
1154 | out: | ||
1155 | return -ENOMEM; | ||
1156 | } | ||
1157 | |||
1158 | /* assume device was already detached */ | ||
1159 | static void omap_iommu_domain_destroy(struct iommu_domain *domain) | ||
1160 | { | ||
1161 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
1162 | |||
1163 | domain->priv = NULL; | ||
1164 | |||
1165 | kfree(omap_domain->pgtable); | ||
1166 | kfree(omap_domain); | ||
1167 | } | ||
1168 | |||
1169 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, | ||
1170 | unsigned long da) | ||
1171 | { | ||
1172 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
1173 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | ||
1174 | struct device *dev = oiommu->dev; | ||
1175 | u32 *pgd, *pte; | ||
1176 | phys_addr_t ret = 0; | ||
1177 | |||
1178 | iopgtable_lookup_entry(oiommu, da, &pgd, &pte); | ||
1179 | |||
1180 | if (pte) { | ||
1181 | if (iopte_is_small(*pte)) | ||
1182 | ret = omap_iommu_translate(*pte, da, IOPTE_MASK); | ||
1183 | else if (iopte_is_large(*pte)) | ||
1184 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); | ||
1185 | else | ||
1186 | dev_err(dev, "bogus pte 0x%x", *pte); | ||
1187 | } else { | ||
1188 | if (iopgd_is_section(*pgd)) | ||
1189 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); | ||
1190 | else if (iopgd_is_super(*pgd)) | ||
1191 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); | ||
1192 | else | ||
1193 | dev_err(dev, "bogus pgd 0x%x", *pgd); | ||
1194 | } | ||
1195 | |||
1196 | return ret; | ||
1197 | } | ||
1198 | |||
1199 | static int omap_iommu_domain_has_cap(struct iommu_domain *domain, | ||
1200 | unsigned long cap) | ||
1201 | { | ||
1202 | return 0; | ||
1203 | } | ||
1204 | |||
1205 | static struct iommu_ops omap_iommu_ops = { | ||
1206 | .domain_init = omap_iommu_domain_init, | ||
1207 | .domain_destroy = omap_iommu_domain_destroy, | ||
1208 | .attach_dev = omap_iommu_attach_dev, | ||
1209 | .detach_dev = omap_iommu_detach_dev, | ||
1210 | .map = omap_iommu_map, | ||
1211 | .unmap = omap_iommu_unmap, | ||
1212 | .iova_to_phys = omap_iommu_iova_to_phys, | ||
1213 | .domain_has_cap = omap_iommu_domain_has_cap, | ||
1214 | }; | ||
1215 | |||
1075 | static int __init omap_iommu_init(void) | 1216 | static int __init omap_iommu_init(void) |
1076 | { | 1217 | { |
1077 | struct kmem_cache *p; | 1218 | struct kmem_cache *p; |
@@ -1084,6 +1225,8 @@ static int __init omap_iommu_init(void) | |||
1084 | return -ENOMEM; | 1225 | return -ENOMEM; |
1085 | iopte_cachep = p; | 1226 | iopte_cachep = p; |
1086 | 1227 | ||
1228 | bus_set_iommu(&platform_bus_type, &omap_iommu_ops); | ||
1229 | |||
1087 | return platform_driver_register(&omap_iommu_driver); | 1230 | return platform_driver_register(&omap_iommu_driver); |
1088 | } | 1231 | } |
1089 | module_init(omap_iommu_init); | 1232 | module_init(omap_iommu_init); |
diff --git a/arch/arm/plat-omap/iovmm.c b/drivers/iommu/omap-iovmm.c index 79e7fedb8602..e8fdb8830f69 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/drivers/iommu/omap-iovmm.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/vmalloc.h> | 15 | #include <linux/vmalloc.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/scatterlist.h> | 17 | #include <linux/scatterlist.h> |
18 | #include <linux/iommu.h> | ||
18 | 19 | ||
19 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
20 | #include <asm/mach/map.h> | 21 | #include <asm/mach/map.h> |
@@ -22,44 +23,19 @@ | |||
22 | #include <plat/iommu.h> | 23 | #include <plat/iommu.h> |
23 | #include <plat/iovmm.h> | 24 | #include <plat/iovmm.h> |
24 | 25 | ||
25 | #include "iopgtable.h" | 26 | #include <plat/iopgtable.h> |
26 | |||
27 | /* | ||
28 | * A device driver needs to create address mappings between: | ||
29 | * | ||
30 | * - iommu/device address | ||
31 | * - physical address | ||
32 | * - mpu virtual address | ||
33 | * | ||
34 | * There are 4 possible patterns for them: | ||
35 | * | ||
36 | * |iova/ mapping iommu_ page | ||
37 | * | da pa va (d)-(p)-(v) function type | ||
38 | * --------------------------------------------------------------------------- | ||
39 | * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s | ||
40 | * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s | ||
41 | * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s | ||
42 | * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n* | ||
43 | * | ||
44 | * | ||
45 | * 'iova': device iommu virtual address | ||
46 | * 'da': alias of 'iova' | ||
47 | * 'pa': physical address | ||
48 | * 'va': mpu virtual address | ||
49 | * | ||
50 | * 'c': contiguous memory area | ||
51 | * 'd': discontiguous memory area | ||
52 | * 'a': anonymous memory allocation | ||
53 | * '()': optional feature | ||
54 | * | ||
55 | * 'n': a normal page(4KB) size is used. | ||
56 | * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used. | ||
57 | * | ||
58 | * '*': not yet, but feasible. | ||
59 | */ | ||
60 | 27 | ||
61 | static struct kmem_cache *iovm_area_cachep; | 28 | static struct kmem_cache *iovm_area_cachep; |
62 | 29 | ||
30 | /* return the offset of the first scatterlist entry in a sg table */ | ||
31 | static unsigned int sgtable_offset(const struct sg_table *sgt) | ||
32 | { | ||
33 | if (!sgt || !sgt->nents) | ||
34 | return 0; | ||
35 | |||
36 | return sgt->sgl->offset; | ||
37 | } | ||
38 | |||
63 | /* return total bytes of sg buffers */ | 39 | /* return total bytes of sg buffers */ |
64 | static size_t sgtable_len(const struct sg_table *sgt) | 40 | static size_t sgtable_len(const struct sg_table *sgt) |
65 | { | 41 | { |
@@ -72,11 +48,17 @@ static size_t sgtable_len(const struct sg_table *sgt) | |||
72 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | 48 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { |
73 | size_t bytes; | 49 | size_t bytes; |
74 | 50 | ||
75 | bytes = sg->length; | 51 | bytes = sg->length + sg->offset; |
76 | 52 | ||
77 | if (!iopgsz_ok(bytes)) { | 53 | if (!iopgsz_ok(bytes)) { |
78 | pr_err("%s: sg[%d] not iommu pagesize(%x)\n", | 54 | pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n", |
79 | __func__, i, bytes); | 55 | __func__, i, bytes, sg->offset); |
56 | return 0; | ||
57 | } | ||
58 | |||
59 | if (i && sg->offset) { | ||
60 | pr_err("%s: sg[%d] offset not allowed in internal " | ||
61 | "entries\n", __func__, i); | ||
80 | return 0; | 62 | return 0; |
81 | } | 63 | } |
82 | 64 | ||
@@ -197,8 +179,8 @@ static void *vmap_sg(const struct sg_table *sgt) | |||
197 | u32 pa; | 179 | u32 pa; |
198 | int err; | 180 | int err; |
199 | 181 | ||
200 | pa = sg_phys(sg); | 182 | pa = sg_phys(sg) - sg->offset; |
201 | bytes = sg->length; | 183 | bytes = sg->length + sg->offset; |
202 | 184 | ||
203 | BUG_ON(bytes != PAGE_SIZE); | 185 | BUG_ON(bytes != PAGE_SIZE); |
204 | 186 | ||
@@ -224,7 +206,8 @@ static inline void vunmap_sg(const void *va) | |||
224 | vunmap(va); | 206 | vunmap(va); |
225 | } | 207 | } |
226 | 208 | ||
227 | static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) | 209 | static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj, |
210 | const u32 da) | ||
228 | { | 211 | { |
229 | struct iovm_struct *tmp; | 212 | struct iovm_struct *tmp; |
230 | 213 | ||
@@ -246,12 +229,12 @@ static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) | |||
246 | } | 229 | } |
247 | 230 | ||
248 | /** | 231 | /** |
249 | * find_iovm_area - find iovma which includes @da | 232 | * omap_find_iovm_area - find iovma which includes @da |
250 | * @da: iommu device virtual address | 233 | * @da: iommu device virtual address |
251 | * | 234 | * |
252 | * Find the existing iovma starting at @da | 235 | * Find the existing iovma starting at @da |
253 | */ | 236 | */ |
254 | struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) | 237 | struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da) |
255 | { | 238 | { |
256 | struct iovm_struct *area; | 239 | struct iovm_struct *area; |
257 | 240 | ||
@@ -261,13 +244,13 @@ struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) | |||
261 | 244 | ||
262 | return area; | 245 | return area; |
263 | } | 246 | } |
264 | EXPORT_SYMBOL_GPL(find_iovm_area); | 247 | EXPORT_SYMBOL_GPL(omap_find_iovm_area); |
265 | 248 | ||
266 | /* | 249 | /* |
267 | * This finds the hole(area) which fits the requested address and len | 250 | * This finds the hole(area) which fits the requested address and len |
268 | * in iovmas mmap, and returns the new allocated iovma. | 251 | * in iovmas mmap, and returns the new allocated iovma. |
269 | */ | 252 | */ |
270 | static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, | 253 | static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da, |
271 | size_t bytes, u32 flags) | 254 | size_t bytes, u32 flags) |
272 | { | 255 | { |
273 | struct iovm_struct *new, *tmp; | 256 | struct iovm_struct *new, *tmp; |
@@ -342,7 +325,7 @@ found: | |||
342 | return new; | 325 | return new; |
343 | } | 326 | } |
344 | 327 | ||
345 | static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) | 328 | static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area) |
346 | { | 329 | { |
347 | size_t bytes; | 330 | size_t bytes; |
348 | 331 | ||
@@ -358,14 +341,14 @@ static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) | |||
358 | } | 341 | } |
359 | 342 | ||
360 | /** | 343 | /** |
361 | * da_to_va - convert (d) to (v) | 344 | * omap_da_to_va - convert (d) to (v) |
362 | * @obj: objective iommu | 345 | * @obj: objective iommu |
363 | * @da: iommu device virtual address | 346 | * @da: iommu device virtual address |
364 | * @va: mpu virtual address | 347 | * @va: mpu virtual address |
365 | * | 348 | * |
366 | * Returns mpu virtual addr which corresponds to a given device virtual addr | 349 | * Returns mpu virtual addr which corresponds to a given device virtual addr |
367 | */ | 350 | */ |
368 | void *da_to_va(struct iommu *obj, u32 da) | 351 | void *omap_da_to_va(struct omap_iommu *obj, u32 da) |
369 | { | 352 | { |
370 | void *va = NULL; | 353 | void *va = NULL; |
371 | struct iovm_struct *area; | 354 | struct iovm_struct *area; |
@@ -383,7 +366,7 @@ out: | |||
383 | 366 | ||
384 | return va; | 367 | return va; |
385 | } | 368 | } |
386 | EXPORT_SYMBOL_GPL(da_to_va); | 369 | EXPORT_SYMBOL_GPL(omap_da_to_va); |
387 | 370 | ||
388 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | 371 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) |
389 | { | 372 | { |
@@ -397,7 +380,7 @@ static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | |||
397 | const size_t bytes = PAGE_SIZE; | 380 | const size_t bytes = PAGE_SIZE; |
398 | 381 | ||
399 | /* | 382 | /* |
400 | * iommu 'superpage' isn't supported with 'iommu_vmalloc()' | 383 | * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()' |
401 | */ | 384 | */ |
402 | pg = vmalloc_to_page(va); | 385 | pg = vmalloc_to_page(va); |
403 | BUG_ON(!pg); | 386 | BUG_ON(!pg); |
@@ -418,74 +401,39 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | |||
418 | BUG_ON(!sgt); | 401 | BUG_ON(!sgt); |
419 | } | 402 | } |
420 | 403 | ||
421 | static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da, | ||
422 | size_t len) | ||
423 | { | ||
424 | unsigned int i; | ||
425 | struct scatterlist *sg; | ||
426 | |||
427 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
428 | unsigned bytes; | ||
429 | |||
430 | bytes = max_alignment(da | pa); | ||
431 | bytes = min_t(unsigned, bytes, iopgsz_max(len)); | ||
432 | |||
433 | BUG_ON(!iopgsz_ok(bytes)); | ||
434 | |||
435 | sg_set_buf(sg, phys_to_virt(pa), bytes); | ||
436 | /* | ||
437 | * 'pa' is cotinuous(linear). | ||
438 | */ | ||
439 | pa += bytes; | ||
440 | da += bytes; | ||
441 | len -= bytes; | ||
442 | } | ||
443 | BUG_ON(len); | ||
444 | } | ||
445 | |||
446 | static inline void sgtable_drain_kmalloc(struct sg_table *sgt) | ||
447 | { | ||
448 | /* | ||
449 | * Actually this is not necessary at all, just exists for | ||
450 | * consistency of the code readability | ||
451 | */ | ||
452 | BUG_ON(!sgt); | ||
453 | } | ||
454 | |||
455 | /* create 'da' <-> 'pa' mapping from 'sgt' */ | 404 | /* create 'da' <-> 'pa' mapping from 'sgt' */ |
456 | static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, | 405 | static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, |
457 | const struct sg_table *sgt, u32 flags) | 406 | const struct sg_table *sgt, u32 flags) |
458 | { | 407 | { |
459 | int err; | 408 | int err; |
460 | unsigned int i, j; | 409 | unsigned int i, j; |
461 | struct scatterlist *sg; | 410 | struct scatterlist *sg; |
462 | u32 da = new->da_start; | 411 | u32 da = new->da_start; |
412 | int order; | ||
463 | 413 | ||
464 | if (!obj || !sgt) | 414 | if (!domain || !sgt) |
465 | return -EINVAL; | 415 | return -EINVAL; |
466 | 416 | ||
467 | BUG_ON(!sgtable_ok(sgt)); | 417 | BUG_ON(!sgtable_ok(sgt)); |
468 | 418 | ||
469 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | 419 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { |
470 | u32 pa; | 420 | u32 pa; |
471 | int pgsz; | ||
472 | size_t bytes; | 421 | size_t bytes; |
473 | struct iotlb_entry e; | ||
474 | 422 | ||
475 | pa = sg_phys(sg); | 423 | pa = sg_phys(sg) - sg->offset; |
476 | bytes = sg->length; | 424 | bytes = sg->length + sg->offset; |
477 | 425 | ||
478 | flags &= ~IOVMF_PGSZ_MASK; | 426 | flags &= ~IOVMF_PGSZ_MASK; |
479 | pgsz = bytes_to_iopgsz(bytes); | 427 | |
480 | if (pgsz < 0) | 428 | if (bytes_to_iopgsz(bytes) < 0) |
481 | goto err_out; | 429 | goto err_out; |
482 | flags |= pgsz; | 430 | |
431 | order = get_order(bytes); | ||
483 | 432 | ||
484 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, | 433 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, |
485 | i, da, pa, bytes); | 434 | i, da, pa, bytes); |
486 | 435 | ||
487 | iotlb_init_entry(&e, da, pa, flags); | 436 | err = iommu_map(domain, da, pa, order, flags); |
488 | err = iopgtable_store_entry(obj, &e); | ||
489 | if (err) | 437 | if (err) |
490 | goto err_out; | 438 | goto err_out; |
491 | 439 | ||
@@ -499,9 +447,11 @@ err_out: | |||
499 | for_each_sg(sgt->sgl, sg, i, j) { | 447 | for_each_sg(sgt->sgl, sg, i, j) { |
500 | size_t bytes; | 448 | size_t bytes; |
501 | 449 | ||
502 | bytes = iopgtable_clear_entry(obj, da); | 450 | bytes = sg->length + sg->offset; |
451 | order = get_order(bytes); | ||
503 | 452 | ||
504 | BUG_ON(!iopgsz_ok(bytes)); | 453 | /* ignore failures.. we're already handling one */ |
454 | iommu_unmap(domain, da, order); | ||
505 | 455 | ||
506 | da += bytes; | 456 | da += bytes; |
507 | } | 457 | } |
@@ -509,22 +459,31 @@ err_out: | |||
509 | } | 459 | } |
510 | 460 | ||
511 | /* release 'da' <-> 'pa' mapping */ | 461 | /* release 'da' <-> 'pa' mapping */ |
512 | static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) | 462 | static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, |
463 | struct iovm_struct *area) | ||
513 | { | 464 | { |
514 | u32 start; | 465 | u32 start; |
515 | size_t total = area->da_end - area->da_start; | 466 | size_t total = area->da_end - area->da_start; |
467 | const struct sg_table *sgt = area->sgt; | ||
468 | struct scatterlist *sg; | ||
469 | int i, err; | ||
516 | 470 | ||
471 | BUG_ON(!sgtable_ok(sgt)); | ||
517 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); | 472 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); |
518 | 473 | ||
519 | start = area->da_start; | 474 | start = area->da_start; |
520 | while (total > 0) { | 475 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { |
521 | size_t bytes; | 476 | size_t bytes; |
477 | int order; | ||
478 | |||
479 | bytes = sg->length + sg->offset; | ||
480 | order = get_order(bytes); | ||
481 | |||
482 | err = iommu_unmap(domain, start, order); | ||
483 | if (err < 0) | ||
484 | break; | ||
522 | 485 | ||
523 | bytes = iopgtable_clear_entry(obj, start); | 486 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", |
524 | if (bytes == 0) | ||
525 | bytes = PAGE_SIZE; | ||
526 | else | ||
527 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", | ||
528 | __func__, start, bytes, area->flags); | 487 | __func__, start, bytes, area->flags); |
529 | 488 | ||
530 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | 489 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); |
@@ -536,7 +495,8 @@ static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) | |||
536 | } | 495 | } |
537 | 496 | ||
538 | /* template function for all unmapping */ | 497 | /* template function for all unmapping */ |
539 | static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, | 498 | static struct sg_table *unmap_vm_area(struct iommu_domain *domain, |
499 | struct omap_iommu *obj, const u32 da, | ||
540 | void (*fn)(const void *), u32 flags) | 500 | void (*fn)(const void *), u32 flags) |
541 | { | 501 | { |
542 | struct sg_table *sgt = NULL; | 502 | struct sg_table *sgt = NULL; |
@@ -562,7 +522,7 @@ static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, | |||
562 | } | 522 | } |
563 | sgt = (struct sg_table *)area->sgt; | 523 | sgt = (struct sg_table *)area->sgt; |
564 | 524 | ||
565 | unmap_iovm_area(obj, area); | 525 | unmap_iovm_area(domain, obj, area); |
566 | 526 | ||
567 | fn(area->va); | 527 | fn(area->va); |
568 | 528 | ||
@@ -577,8 +537,9 @@ out: | |||
577 | return sgt; | 537 | return sgt; |
578 | } | 538 | } |
579 | 539 | ||
580 | static u32 map_iommu_region(struct iommu *obj, u32 da, | 540 | static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj, |
581 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | 541 | u32 da, const struct sg_table *sgt, void *va, |
542 | size_t bytes, u32 flags) | ||
582 | { | 543 | { |
583 | int err = -ENOMEM; | 544 | int err = -ENOMEM; |
584 | struct iovm_struct *new; | 545 | struct iovm_struct *new; |
@@ -593,7 +554,7 @@ static u32 map_iommu_region(struct iommu *obj, u32 da, | |||
593 | new->va = va; | 554 | new->va = va; |
594 | new->sgt = sgt; | 555 | new->sgt = sgt; |
595 | 556 | ||
596 | if (map_iovm_area(obj, new, sgt, new->flags)) | 557 | if (map_iovm_area(domain, new, sgt, new->flags)) |
597 | goto err_map; | 558 | goto err_map; |
598 | 559 | ||
599 | mutex_unlock(&obj->mmap_lock); | 560 | mutex_unlock(&obj->mmap_lock); |
@@ -610,14 +571,16 @@ err_alloc_iovma: | |||
610 | return err; | 571 | return err; |
611 | } | 572 | } |
612 | 573 | ||
613 | static inline u32 __iommu_vmap(struct iommu *obj, u32 da, | 574 | static inline u32 |
614 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | 575 | __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, |
576 | u32 da, const struct sg_table *sgt, | ||
577 | void *va, size_t bytes, u32 flags) | ||
615 | { | 578 | { |
616 | return map_iommu_region(obj, da, sgt, va, bytes, flags); | 579 | return map_iommu_region(domain, obj, da, sgt, va, bytes, flags); |
617 | } | 580 | } |
618 | 581 | ||
619 | /** | 582 | /** |
620 | * iommu_vmap - (d)-(p)-(v) address mapper | 583 | * omap_iommu_vmap - (d)-(p)-(v) address mapper |
621 | * @obj: objective iommu | 584 | * @obj: objective iommu |
622 | * @sgt: address of scatter gather table | 585 | * @sgt: address of scatter gather table |
623 | * @flags: iovma and page property | 586 | * @flags: iovma and page property |
@@ -625,8 +588,8 @@ static inline u32 __iommu_vmap(struct iommu *obj, u32 da, | |||
625 | * Creates 1-n-1 mapping with given @sgt and returns @da. | 588 | * Creates 1-n-1 mapping with given @sgt and returns @da. |
626 | * All @sgt element must be io page size aligned. | 589 | * All @sgt element must be io page size aligned. |
627 | */ | 590 | */ |
628 | u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, | 591 | u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, |
629 | u32 flags) | 592 | const struct sg_table *sgt, u32 flags) |
630 | { | 593 | { |
631 | size_t bytes; | 594 | size_t bytes; |
632 | void *va = NULL; | 595 | void *va = NULL; |
@@ -648,38 +611,41 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, | |||
648 | flags |= IOVMF_DISCONT; | 611 | flags |= IOVMF_DISCONT; |
649 | flags |= IOVMF_MMIO; | 612 | flags |= IOVMF_MMIO; |
650 | 613 | ||
651 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | 614 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); |
652 | if (IS_ERR_VALUE(da)) | 615 | if (IS_ERR_VALUE(da)) |
653 | vunmap_sg(va); | 616 | vunmap_sg(va); |
654 | 617 | ||
655 | return da; | 618 | return da + sgtable_offset(sgt); |
656 | } | 619 | } |
657 | EXPORT_SYMBOL_GPL(iommu_vmap); | 620 | EXPORT_SYMBOL_GPL(omap_iommu_vmap); |
658 | 621 | ||
659 | /** | 622 | /** |
660 | * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' | 623 | * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()' |
661 | * @obj: objective iommu | 624 | * @obj: objective iommu |
662 | * @da: iommu device virtual address | 625 | * @da: iommu device virtual address |
663 | * | 626 | * |
664 | * Free the iommu virtually contiguous memory area starting at | 627 | * Free the iommu virtually contiguous memory area starting at |
665 | * @da, which was returned by 'iommu_vmap()'. | 628 | * @da, which was returned by 'omap_iommu_vmap()'. |
666 | */ | 629 | */ |
667 | struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) | 630 | struct sg_table * |
631 | omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da) | ||
668 | { | 632 | { |
669 | struct sg_table *sgt; | 633 | struct sg_table *sgt; |
670 | /* | 634 | /* |
671 | * 'sgt' is allocated before 'iommu_vmalloc()' is called. | 635 | * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. |
672 | * Just returns 'sgt' to the caller to free | 636 | * Just returns 'sgt' to the caller to free |
673 | */ | 637 | */ |
674 | sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); | 638 | da &= PAGE_MASK; |
639 | sgt = unmap_vm_area(domain, obj, da, vunmap_sg, | ||
640 | IOVMF_DISCONT | IOVMF_MMIO); | ||
675 | if (!sgt) | 641 | if (!sgt) |
676 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | 642 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); |
677 | return sgt; | 643 | return sgt; |
678 | } | 644 | } |
679 | EXPORT_SYMBOL_GPL(iommu_vunmap); | 645 | EXPORT_SYMBOL_GPL(omap_iommu_vunmap); |
680 | 646 | ||
681 | /** | 647 | /** |
682 | * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper | 648 | * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper |
683 | * @obj: objective iommu | 649 | * @obj: objective iommu |
684 | * @da: contiguous iommu virtual memory | 650 | * @da: contiguous iommu virtual memory |
685 | * @bytes: allocation size | 651 | * @bytes: allocation size |
@@ -688,7 +654,9 @@ EXPORT_SYMBOL_GPL(iommu_vunmap); | |||
688 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns | 654 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns |
689 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. | 655 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. |
690 | */ | 656 | */ |
691 | u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | 657 | u32 |
658 | omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, | ||
659 | size_t bytes, u32 flags) | ||
692 | { | 660 | { |
693 | void *va; | 661 | void *va; |
694 | struct sg_table *sgt; | 662 | struct sg_table *sgt; |
@@ -712,7 +680,7 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | |||
712 | } | 680 | } |
713 | sgtable_fill_vmalloc(sgt, va); | 681 | sgtable_fill_vmalloc(sgt, va); |
714 | 682 | ||
715 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | 683 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); |
716 | if (IS_ERR_VALUE(da)) | 684 | if (IS_ERR_VALUE(da)) |
717 | goto err_iommu_vmap; | 685 | goto err_iommu_vmap; |
718 | 686 | ||
@@ -725,158 +693,28 @@ err_sgt_alloc: | |||
725 | vfree(va); | 693 | vfree(va); |
726 | return da; | 694 | return da; |
727 | } | 695 | } |
728 | EXPORT_SYMBOL_GPL(iommu_vmalloc); | 696 | EXPORT_SYMBOL_GPL(omap_iommu_vmalloc); |
729 | 697 | ||
730 | /** | 698 | /** |
731 | * iommu_vfree - release memory allocated by 'iommu_vmalloc()' | 699 | * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()' |
732 | * @obj: objective iommu | 700 | * @obj: objective iommu |
733 | * @da: iommu device virtual address | 701 | * @da: iommu device virtual address |
734 | * | 702 | * |
735 | * Frees the iommu virtually continuous memory area starting at | 703 | * Frees the iommu virtually continuous memory area starting at |
736 | * @da, as obtained from 'iommu_vmalloc()'. | 704 | * @da, as obtained from 'omap_iommu_vmalloc()'. |
737 | */ | 705 | */ |
738 | void iommu_vfree(struct iommu *obj, const u32 da) | 706 | void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj, |
707 | const u32 da) | ||
739 | { | 708 | { |
740 | struct sg_table *sgt; | 709 | struct sg_table *sgt; |
741 | 710 | ||
742 | sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); | 711 | sgt = unmap_vm_area(domain, obj, da, vfree, |
712 | IOVMF_DISCONT | IOVMF_ALLOC); | ||
743 | if (!sgt) | 713 | if (!sgt) |
744 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | 714 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); |
745 | sgtable_free(sgt); | 715 | sgtable_free(sgt); |
746 | } | 716 | } |
747 | EXPORT_SYMBOL_GPL(iommu_vfree); | 717 | EXPORT_SYMBOL_GPL(omap_iommu_vfree); |
748 | |||
749 | static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, | ||
750 | size_t bytes, u32 flags) | ||
751 | { | ||
752 | struct sg_table *sgt; | ||
753 | |||
754 | sgt = sgtable_alloc(bytes, flags, da, pa); | ||
755 | if (IS_ERR(sgt)) | ||
756 | return PTR_ERR(sgt); | ||
757 | |||
758 | sgtable_fill_kmalloc(sgt, pa, da, bytes); | ||
759 | |||
760 | da = map_iommu_region(obj, da, sgt, va, bytes, flags); | ||
761 | if (IS_ERR_VALUE(da)) { | ||
762 | sgtable_drain_kmalloc(sgt); | ||
763 | sgtable_free(sgt); | ||
764 | } | ||
765 | |||
766 | return da; | ||
767 | } | ||
768 | |||
769 | /** | ||
770 | * iommu_kmap - (d)-(p)-(v) address mapper | ||
771 | * @obj: objective iommu | ||
772 | * @da: contiguous iommu virtual memory | ||
773 | * @pa: contiguous physical memory | ||
774 | * @flags: iovma and page property | ||
775 | * | ||
776 | * Creates 1-1-1 mapping and returns @da again, which can be | ||
777 | * adjusted if 'IOVMF_DA_FIXED' is not set. | ||
778 | */ | ||
779 | u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, | ||
780 | u32 flags) | ||
781 | { | ||
782 | void *va; | ||
783 | |||
784 | if (!obj || !obj->dev || !bytes) | ||
785 | return -EINVAL; | ||
786 | |||
787 | bytes = PAGE_ALIGN(bytes); | ||
788 | |||
789 | va = ioremap(pa, bytes); | ||
790 | if (!va) | ||
791 | return -ENOMEM; | ||
792 | |||
793 | flags |= IOVMF_LINEAR; | ||
794 | flags |= IOVMF_MMIO; | ||
795 | |||
796 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | ||
797 | if (IS_ERR_VALUE(da)) | ||
798 | iounmap(va); | ||
799 | |||
800 | return da; | ||
801 | } | ||
802 | EXPORT_SYMBOL_GPL(iommu_kmap); | ||
803 | |||
804 | /** | ||
805 | * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()' | ||
806 | * @obj: objective iommu | ||
807 | * @da: iommu device virtual address | ||
808 | * | ||
809 | * Frees the iommu virtually contiguous memory area starting at | ||
810 | * @da, which was passed to and was returned by'iommu_kmap()'. | ||
811 | */ | ||
812 | void iommu_kunmap(struct iommu *obj, u32 da) | ||
813 | { | ||
814 | struct sg_table *sgt; | ||
815 | typedef void (*func_t)(const void *); | ||
816 | |||
817 | sgt = unmap_vm_area(obj, da, (func_t)iounmap, | ||
818 | IOVMF_LINEAR | IOVMF_MMIO); | ||
819 | if (!sgt) | ||
820 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
821 | sgtable_free(sgt); | ||
822 | } | ||
823 | EXPORT_SYMBOL_GPL(iommu_kunmap); | ||
824 | |||
825 | /** | ||
826 | * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper | ||
827 | * @obj: objective iommu | ||
828 | * @da: contiguous iommu virtual memory | ||
829 | * @bytes: bytes for allocation | ||
830 | * @flags: iovma and page property | ||
831 | * | ||
832 | * Allocate @bytes linearly and creates 1-1-1 mapping and returns | ||
833 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. | ||
834 | */ | ||
835 | u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | ||
836 | { | ||
837 | void *va; | ||
838 | u32 pa; | ||
839 | |||
840 | if (!obj || !obj->dev || !bytes) | ||
841 | return -EINVAL; | ||
842 | |||
843 | bytes = PAGE_ALIGN(bytes); | ||
844 | |||
845 | va = kmalloc(bytes, GFP_KERNEL | GFP_DMA); | ||
846 | if (!va) | ||
847 | return -ENOMEM; | ||
848 | pa = virt_to_phys(va); | ||
849 | |||
850 | flags |= IOVMF_LINEAR; | ||
851 | flags |= IOVMF_ALLOC; | ||
852 | |||
853 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | ||
854 | if (IS_ERR_VALUE(da)) | ||
855 | kfree(va); | ||
856 | |||
857 | return da; | ||
858 | } | ||
859 | EXPORT_SYMBOL_GPL(iommu_kmalloc); | ||
860 | |||
861 | /** | ||
862 | * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()' | ||
863 | * @obj: objective iommu | ||
864 | * @da: iommu device virtual address | ||
865 | * | ||
866 | * Frees the iommu virtually contiguous memory area starting at | ||
867 | * @da, which was passed to and was returned by'iommu_kmalloc()'. | ||
868 | */ | ||
869 | void iommu_kfree(struct iommu *obj, u32 da) | ||
870 | { | ||
871 | struct sg_table *sgt; | ||
872 | |||
873 | sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); | ||
874 | if (!sgt) | ||
875 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
876 | sgtable_free(sgt); | ||
877 | } | ||
878 | EXPORT_SYMBOL_GPL(iommu_kfree); | ||
879 | |||
880 | 718 | ||
881 | static int __init iovmm_init(void) | 719 | static int __init iovmm_init(void) |
882 | { | 720 | { |
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index f574dc012cad..620106937ec6 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig | |||
@@ -763,8 +763,7 @@ source "drivers/media/video/m5mols/Kconfig" | |||
763 | 763 | ||
764 | config VIDEO_OMAP3 | 764 | config VIDEO_OMAP3 |
765 | tristate "OMAP 3 Camera support (EXPERIMENTAL)" | 765 | tristate "OMAP 3 Camera support (EXPERIMENTAL)" |
766 | select OMAP_IOMMU | 766 | depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL |
767 | depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL | ||
768 | ---help--- | 767 | ---help--- |
769 | Driver for an OMAP 3 camera controller. | 768 | Driver for an OMAP 3 camera controller. |
770 | 769 | ||
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c index 5cea2bbd7014..a7ed98596883 100644 --- a/drivers/media/video/omap3isp/isp.c +++ b/drivers/media/video/omap3isp/isp.c | |||
@@ -80,6 +80,13 @@ | |||
80 | #include "isph3a.h" | 80 | #include "isph3a.h" |
81 | #include "isphist.h" | 81 | #include "isphist.h" |
82 | 82 | ||
83 | /* | ||
84 | * this is provided as an interim solution until omap3isp doesn't need | ||
85 | * any omap-specific iommu API | ||
86 | */ | ||
87 | #define to_iommu(dev) \ | ||
88 | (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)) | ||
89 | |||
83 | static unsigned int autoidle; | 90 | static unsigned int autoidle; |
84 | module_param(autoidle, int, 0444); | 91 | module_param(autoidle, int, 0444); |
85 | MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support"); | 92 | MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support"); |
@@ -1108,7 +1115,7 @@ static void isp_save_ctx(struct isp_device *isp) | |||
1108 | { | 1115 | { |
1109 | isp_save_context(isp, isp_reg_list); | 1116 | isp_save_context(isp, isp_reg_list); |
1110 | if (isp->iommu) | 1117 | if (isp->iommu) |
1111 | iommu_save_ctx(isp->iommu); | 1118 | omap_iommu_save_ctx(isp->iommu); |
1112 | } | 1119 | } |
1113 | 1120 | ||
1114 | /* | 1121 | /* |
@@ -1122,7 +1129,7 @@ static void isp_restore_ctx(struct isp_device *isp) | |||
1122 | { | 1129 | { |
1123 | isp_restore_context(isp, isp_reg_list); | 1130 | isp_restore_context(isp, isp_reg_list); |
1124 | if (isp->iommu) | 1131 | if (isp->iommu) |
1125 | iommu_restore_ctx(isp->iommu); | 1132 | omap_iommu_restore_ctx(isp->iommu); |
1126 | omap3isp_ccdc_restore_context(isp); | 1133 | omap3isp_ccdc_restore_context(isp); |
1127 | omap3isp_preview_restore_context(isp); | 1134 | omap3isp_preview_restore_context(isp); |
1128 | } | 1135 | } |
@@ -1975,7 +1982,8 @@ static int isp_remove(struct platform_device *pdev) | |||
1975 | isp_cleanup_modules(isp); | 1982 | isp_cleanup_modules(isp); |
1976 | 1983 | ||
1977 | omap3isp_get(isp); | 1984 | omap3isp_get(isp); |
1978 | iommu_put(isp->iommu); | 1985 | iommu_detach_device(isp->domain, isp->iommu_dev); |
1986 | iommu_domain_free(isp->domain); | ||
1979 | omap3isp_put(isp); | 1987 | omap3isp_put(isp); |
1980 | 1988 | ||
1981 | free_irq(isp->irq_num, isp); | 1989 | free_irq(isp->irq_num, isp); |
@@ -2123,25 +2131,41 @@ static int isp_probe(struct platform_device *pdev) | |||
2123 | } | 2131 | } |
2124 | 2132 | ||
2125 | /* IOMMU */ | 2133 | /* IOMMU */ |
2126 | isp->iommu = iommu_get("isp"); | 2134 | isp->iommu_dev = omap_find_iommu_device("isp"); |
2127 | if (IS_ERR_OR_NULL(isp->iommu)) { | 2135 | if (!isp->iommu_dev) { |
2128 | isp->iommu = NULL; | 2136 | dev_err(isp->dev, "omap_find_iommu_device failed\n"); |
2129 | ret = -ENODEV; | 2137 | ret = -ENODEV; |
2130 | goto error_isp; | 2138 | goto error_isp; |
2131 | } | 2139 | } |
2132 | 2140 | ||
2141 | /* to be removed once iommu migration is complete */ | ||
2142 | isp->iommu = to_iommu(isp->iommu_dev); | ||
2143 | |||
2144 | isp->domain = iommu_domain_alloc(pdev->dev.bus); | ||
2145 | if (!isp->domain) { | ||
2146 | dev_err(isp->dev, "can't alloc iommu domain\n"); | ||
2147 | ret = -ENOMEM; | ||
2148 | goto error_isp; | ||
2149 | } | ||
2150 | |||
2151 | ret = iommu_attach_device(isp->domain, isp->iommu_dev); | ||
2152 | if (ret) { | ||
2153 | dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret); | ||
2154 | goto free_domain; | ||
2155 | } | ||
2156 | |||
2133 | /* Interrupt */ | 2157 | /* Interrupt */ |
2134 | isp->irq_num = platform_get_irq(pdev, 0); | 2158 | isp->irq_num = platform_get_irq(pdev, 0); |
2135 | if (isp->irq_num <= 0) { | 2159 | if (isp->irq_num <= 0) { |
2136 | dev_err(isp->dev, "No IRQ resource\n"); | 2160 | dev_err(isp->dev, "No IRQ resource\n"); |
2137 | ret = -ENODEV; | 2161 | ret = -ENODEV; |
2138 | goto error_isp; | 2162 | goto detach_dev; |
2139 | } | 2163 | } |
2140 | 2164 | ||
2141 | if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) { | 2165 | if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) { |
2142 | dev_err(isp->dev, "Unable to request IRQ\n"); | 2166 | dev_err(isp->dev, "Unable to request IRQ\n"); |
2143 | ret = -EINVAL; | 2167 | ret = -EINVAL; |
2144 | goto error_isp; | 2168 | goto detach_dev; |
2145 | } | 2169 | } |
2146 | 2170 | ||
2147 | /* Entities */ | 2171 | /* Entities */ |
@@ -2162,8 +2186,11 @@ error_modules: | |||
2162 | isp_cleanup_modules(isp); | 2186 | isp_cleanup_modules(isp); |
2163 | error_irq: | 2187 | error_irq: |
2164 | free_irq(isp->irq_num, isp); | 2188 | free_irq(isp->irq_num, isp); |
2189 | detach_dev: | ||
2190 | iommu_detach_device(isp->domain, isp->iommu_dev); | ||
2191 | free_domain: | ||
2192 | iommu_domain_free(isp->domain); | ||
2165 | error_isp: | 2193 | error_isp: |
2166 | iommu_put(isp->iommu); | ||
2167 | omap3isp_put(isp); | 2194 | omap3isp_put(isp); |
2168 | error: | 2195 | error: |
2169 | isp_put_clocks(isp); | 2196 | isp_put_clocks(isp); |
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h index 529e582ef948..81fdd85deb60 100644 --- a/drivers/media/video/omap3isp/isp.h +++ b/drivers/media/video/omap3isp/isp.h | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
33 | #include <linux/platform_device.h> | 33 | #include <linux/platform_device.h> |
34 | #include <linux/wait.h> | 34 | #include <linux/wait.h> |
35 | #include <linux/iommu.h> | ||
35 | #include <plat/iommu.h> | 36 | #include <plat/iommu.h> |
36 | #include <plat/iovmm.h> | 37 | #include <plat/iovmm.h> |
37 | 38 | ||
@@ -294,7 +295,9 @@ struct isp_device { | |||
294 | unsigned int sbl_resources; | 295 | unsigned int sbl_resources; |
295 | unsigned int subclk_resources; | 296 | unsigned int subclk_resources; |
296 | 297 | ||
297 | struct iommu *iommu; | 298 | struct omap_iommu *iommu; |
299 | struct iommu_domain *domain; | ||
300 | struct device *iommu_dev; | ||
298 | 301 | ||
299 | struct isp_platform_callback platform_cb; | 302 | struct isp_platform_callback platform_cb; |
300 | }; | 303 | }; |
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c index 80796eb0c53e..892671922f8a 100644 --- a/drivers/media/video/omap3isp/ispccdc.c +++ b/drivers/media/video/omap3isp/ispccdc.c | |||
@@ -366,7 +366,7 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc, | |||
366 | dma_unmap_sg(isp->dev, req->iovm->sgt->sgl, | 366 | dma_unmap_sg(isp->dev, req->iovm->sgt->sgl, |
367 | req->iovm->sgt->nents, DMA_TO_DEVICE); | 367 | req->iovm->sgt->nents, DMA_TO_DEVICE); |
368 | if (req->table) | 368 | if (req->table) |
369 | iommu_vfree(isp->iommu, req->table); | 369 | omap_iommu_vfree(isp->domain, isp->iommu, req->table); |
370 | kfree(req); | 370 | kfree(req); |
371 | } | 371 | } |
372 | 372 | ||
@@ -438,15 +438,15 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, | |||
438 | 438 | ||
439 | req->enable = 1; | 439 | req->enable = 1; |
440 | 440 | ||
441 | req->table = iommu_vmalloc(isp->iommu, 0, req->config.size, | 441 | req->table = omap_iommu_vmalloc(isp->domain, isp->iommu, 0, |
442 | IOMMU_FLAG); | 442 | req->config.size, IOMMU_FLAG); |
443 | if (IS_ERR_VALUE(req->table)) { | 443 | if (IS_ERR_VALUE(req->table)) { |
444 | req->table = 0; | 444 | req->table = 0; |
445 | ret = -ENOMEM; | 445 | ret = -ENOMEM; |
446 | goto done; | 446 | goto done; |
447 | } | 447 | } |
448 | 448 | ||
449 | req->iovm = find_iovm_area(isp->iommu, req->table); | 449 | req->iovm = omap_find_iovm_area(isp->iommu, req->table); |
450 | if (req->iovm == NULL) { | 450 | if (req->iovm == NULL) { |
451 | ret = -ENOMEM; | 451 | ret = -ENOMEM; |
452 | goto done; | 452 | goto done; |
@@ -462,7 +462,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, | |||
462 | dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl, | 462 | dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl, |
463 | req->iovm->sgt->nents, DMA_TO_DEVICE); | 463 | req->iovm->sgt->nents, DMA_TO_DEVICE); |
464 | 464 | ||
465 | table = da_to_va(isp->iommu, req->table); | 465 | table = omap_da_to_va(isp->iommu, req->table); |
466 | if (copy_from_user(table, config->lsc, req->config.size)) { | 466 | if (copy_from_user(table, config->lsc, req->config.size)) { |
467 | ret = -EFAULT; | 467 | ret = -EFAULT; |
468 | goto done; | 468 | goto done; |
@@ -731,18 +731,19 @@ static int ccdc_config(struct isp_ccdc_device *ccdc, | |||
731 | 731 | ||
732 | /* | 732 | /* |
733 | * table_new must be 64-bytes aligned, but it's | 733 | * table_new must be 64-bytes aligned, but it's |
734 | * already done by iommu_vmalloc(). | 734 | * already done by omap_iommu_vmalloc(). |
735 | */ | 735 | */ |
736 | size = ccdc->fpc.fpnum * 4; | 736 | size = ccdc->fpc.fpnum * 4; |
737 | table_new = iommu_vmalloc(isp->iommu, 0, size, | 737 | table_new = omap_iommu_vmalloc(isp->domain, isp->iommu, |
738 | IOMMU_FLAG); | 738 | 0, size, IOMMU_FLAG); |
739 | if (IS_ERR_VALUE(table_new)) | 739 | if (IS_ERR_VALUE(table_new)) |
740 | return -ENOMEM; | 740 | return -ENOMEM; |
741 | 741 | ||
742 | if (copy_from_user(da_to_va(isp->iommu, table_new), | 742 | if (copy_from_user(omap_da_to_va(isp->iommu, table_new), |
743 | (__force void __user *) | 743 | (__force void __user *) |
744 | ccdc->fpc.fpcaddr, size)) { | 744 | ccdc->fpc.fpcaddr, size)) { |
745 | iommu_vfree(isp->iommu, table_new); | 745 | omap_iommu_vfree(isp->domain, isp->iommu, |
746 | table_new); | ||
746 | return -EFAULT; | 747 | return -EFAULT; |
747 | } | 748 | } |
748 | 749 | ||
@@ -752,7 +753,7 @@ static int ccdc_config(struct isp_ccdc_device *ccdc, | |||
752 | 753 | ||
753 | ccdc_configure_fpc(ccdc); | 754 | ccdc_configure_fpc(ccdc); |
754 | if (table_old != 0) | 755 | if (table_old != 0) |
755 | iommu_vfree(isp->iommu, table_old); | 756 | omap_iommu_vfree(isp->domain, isp->iommu, table_old); |
756 | } | 757 | } |
757 | 758 | ||
758 | return ccdc_lsc_config(ccdc, ccdc_struct); | 759 | return ccdc_lsc_config(ccdc, ccdc_struct); |
@@ -2287,5 +2288,5 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp) | |||
2287 | ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); | 2288 | ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); |
2288 | 2289 | ||
2289 | if (ccdc->fpc.fpcaddr != 0) | 2290 | if (ccdc->fpc.fpcaddr != 0) |
2290 | iommu_vfree(isp->iommu, ccdc->fpc.fpcaddr); | 2291 | omap_iommu_vfree(isp->domain, isp->iommu, ccdc->fpc.fpcaddr); |
2291 | } | 2292 | } |
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c index 808065948ac1..732905552261 100644 --- a/drivers/media/video/omap3isp/ispstat.c +++ b/drivers/media/video/omap3isp/ispstat.c | |||
@@ -366,7 +366,8 @@ static void isp_stat_bufs_free(struct ispstat *stat) | |||
366 | dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, | 366 | dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, |
367 | buf->iovm->sgt->nents, | 367 | buf->iovm->sgt->nents, |
368 | DMA_FROM_DEVICE); | 368 | DMA_FROM_DEVICE); |
369 | iommu_vfree(isp->iommu, buf->iommu_addr); | 369 | omap_iommu_vfree(isp->domain, isp->iommu, |
370 | buf->iommu_addr); | ||
370 | } else { | 371 | } else { |
371 | if (!buf->virt_addr) | 372 | if (!buf->virt_addr) |
372 | continue; | 373 | continue; |
@@ -399,8 +400,8 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) | |||
399 | struct iovm_struct *iovm; | 400 | struct iovm_struct *iovm; |
400 | 401 | ||
401 | WARN_ON(buf->dma_addr); | 402 | WARN_ON(buf->dma_addr); |
402 | buf->iommu_addr = iommu_vmalloc(isp->iommu, 0, size, | 403 | buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->iommu, 0, |
403 | IOMMU_FLAG); | 404 | size, IOMMU_FLAG); |
404 | if (IS_ERR((void *)buf->iommu_addr)) { | 405 | if (IS_ERR((void *)buf->iommu_addr)) { |
405 | dev_err(stat->isp->dev, | 406 | dev_err(stat->isp->dev, |
406 | "%s: Can't acquire memory for " | 407 | "%s: Can't acquire memory for " |
@@ -409,7 +410,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) | |||
409 | return -ENOMEM; | 410 | return -ENOMEM; |
410 | } | 411 | } |
411 | 412 | ||
412 | iovm = find_iovm_area(isp->iommu, buf->iommu_addr); | 413 | iovm = omap_find_iovm_area(isp->iommu, buf->iommu_addr); |
413 | if (!iovm || | 414 | if (!iovm || |
414 | !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents, | 415 | !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents, |
415 | DMA_FROM_DEVICE)) { | 416 | DMA_FROM_DEVICE)) { |
@@ -418,7 +419,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) | |||
418 | } | 419 | } |
419 | buf->iovm = iovm; | 420 | buf->iovm = iovm; |
420 | 421 | ||
421 | buf->virt_addr = da_to_va(stat->isp->iommu, | 422 | buf->virt_addr = omap_da_to_va(stat->isp->iommu, |
422 | (u32)buf->iommu_addr); | 423 | (u32)buf->iommu_addr); |
423 | buf->empty = 1; | 424 | buf->empty = 1; |
424 | dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." | 425 | dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." |
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c index fd965adfd597..912ac071b104 100644 --- a/drivers/media/video/omap3isp/ispvideo.c +++ b/drivers/media/video/omap3isp/ispvideo.c | |||
@@ -446,7 +446,7 @@ ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen) | |||
446 | sgt->nents = sglen; | 446 | sgt->nents = sglen; |
447 | sgt->orig_nents = sglen; | 447 | sgt->orig_nents = sglen; |
448 | 448 | ||
449 | da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG); | 449 | da = omap_iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG); |
450 | if (IS_ERR_VALUE(da)) | 450 | if (IS_ERR_VALUE(da)) |
451 | kfree(sgt); | 451 | kfree(sgt); |
452 | 452 | ||
@@ -462,7 +462,7 @@ static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da) | |||
462 | { | 462 | { |
463 | struct sg_table *sgt; | 463 | struct sg_table *sgt; |
464 | 464 | ||
465 | sgt = iommu_vunmap(isp->iommu, (u32)da); | 465 | sgt = omap_iommu_vunmap(isp->domain, isp->iommu, (u32)da); |
466 | kfree(sgt); | 466 | kfree(sgt); |
467 | } | 467 | } |
468 | 468 | ||
diff --git a/include/linux/device.h b/include/linux/device.h index bdcf361ca938..85e78fc7d7fd 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -33,6 +33,7 @@ struct class; | |||
33 | struct subsys_private; | 33 | struct subsys_private; |
34 | struct bus_type; | 34 | struct bus_type; |
35 | struct device_node; | 35 | struct device_node; |
36 | struct iommu_ops; | ||
36 | 37 | ||
37 | struct bus_attribute { | 38 | struct bus_attribute { |
38 | struct attribute attr; | 39 | struct attribute attr; |
@@ -67,6 +68,9 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *); | |||
67 | * @resume: Called to bring a device on this bus out of sleep mode. | 68 | * @resume: Called to bring a device on this bus out of sleep mode. |
68 | * @pm: Power management operations of this bus, callback the specific | 69 | * @pm: Power management operations of this bus, callback the specific |
69 | * device driver's pm-ops. | 70 | * device driver's pm-ops. |
71 | * @iommu_ops IOMMU specific operations for this bus, used to attach IOMMU | ||
72 | * driver implementations to a bus and allow the driver to do | ||
73 | * bus-specific setup | ||
70 | * @p: The private data of the driver core, only the driver core can | 74 | * @p: The private data of the driver core, only the driver core can |
71 | * touch this. | 75 | * touch this. |
72 | * | 76 | * |
@@ -96,6 +100,8 @@ struct bus_type { | |||
96 | 100 | ||
97 | const struct dev_pm_ops *pm; | 101 | const struct dev_pm_ops *pm; |
98 | 102 | ||
103 | struct iommu_ops *iommu_ops; | ||
104 | |||
99 | struct subsys_private *p; | 105 | struct subsys_private *p; |
100 | }; | 106 | }; |
101 | 107 | ||
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 9940319d6f9d..432acc4c054d 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -25,15 +25,29 @@ | |||
25 | #define IOMMU_WRITE (2) | 25 | #define IOMMU_WRITE (2) |
26 | #define IOMMU_CACHE (4) /* DMA cache coherency */ | 26 | #define IOMMU_CACHE (4) /* DMA cache coherency */ |
27 | 27 | ||
28 | struct iommu_ops; | ||
29 | struct bus_type; | ||
28 | struct device; | 30 | struct device; |
31 | struct iommu_domain; | ||
32 | |||
33 | /* iommu fault flags */ | ||
34 | #define IOMMU_FAULT_READ 0x0 | ||
35 | #define IOMMU_FAULT_WRITE 0x1 | ||
36 | |||
37 | typedef int (*iommu_fault_handler_t)(struct iommu_domain *, | ||
38 | struct device *, unsigned long, int); | ||
29 | 39 | ||
30 | struct iommu_domain { | 40 | struct iommu_domain { |
41 | struct iommu_ops *ops; | ||
31 | void *priv; | 42 | void *priv; |
43 | iommu_fault_handler_t handler; | ||
32 | }; | 44 | }; |
33 | 45 | ||
34 | #define IOMMU_CAP_CACHE_COHERENCY 0x1 | 46 | #define IOMMU_CAP_CACHE_COHERENCY 0x1 |
35 | #define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ | 47 | #define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ |
36 | 48 | ||
49 | #ifdef CONFIG_IOMMU_API | ||
50 | |||
37 | struct iommu_ops { | 51 | struct iommu_ops { |
38 | int (*domain_init)(struct iommu_domain *domain); | 52 | int (*domain_init)(struct iommu_domain *domain); |
39 | void (*domain_destroy)(struct iommu_domain *domain); | 53 | void (*domain_destroy)(struct iommu_domain *domain); |
@@ -49,11 +63,9 @@ struct iommu_ops { | |||
49 | unsigned long cap); | 63 | unsigned long cap); |
50 | }; | 64 | }; |
51 | 65 | ||
52 | #ifdef CONFIG_IOMMU_API | 66 | extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops); |
53 | 67 | extern bool iommu_present(struct bus_type *bus); | |
54 | extern void register_iommu(struct iommu_ops *ops); | 68 | extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); |
55 | extern bool iommu_found(void); | ||
56 | extern struct iommu_domain *iommu_domain_alloc(void); | ||
57 | extern void iommu_domain_free(struct iommu_domain *domain); | 69 | extern void iommu_domain_free(struct iommu_domain *domain); |
58 | extern int iommu_attach_device(struct iommu_domain *domain, | 70 | extern int iommu_attach_device(struct iommu_domain *domain, |
59 | struct device *dev); | 71 | struct device *dev); |
@@ -67,19 +79,58 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | |||
67 | unsigned long iova); | 79 | unsigned long iova); |
68 | extern int iommu_domain_has_cap(struct iommu_domain *domain, | 80 | extern int iommu_domain_has_cap(struct iommu_domain *domain, |
69 | unsigned long cap); | 81 | unsigned long cap); |
82 | extern void iommu_set_fault_handler(struct iommu_domain *domain, | ||
83 | iommu_fault_handler_t handler); | ||
84 | |||
85 | /** | ||
86 | * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework | ||
87 | * @domain: the iommu domain where the fault has happened | ||
88 | * @dev: the device where the fault has happened | ||
89 | * @iova: the faulting address | ||
90 | * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) | ||
91 | * | ||
92 | * This function should be called by the low-level IOMMU implementations | ||
93 | * whenever IOMMU faults happen, to allow high-level users, that are | ||
94 | * interested in such events, to know about them. | ||
95 | * | ||
96 | * This event may be useful for several possible use cases: | ||
97 | * - mere logging of the event | ||
98 | * - dynamic TLB/PTE loading | ||
99 | * - if restarting of the faulting device is required | ||
100 | * | ||
101 | * Returns 0 on success and an appropriate error code otherwise (if dynamic | ||
102 | * PTE/TLB loading will one day be supported, implementations will be able | ||
103 | * to tell whether it succeeded or not according to this return value). | ||
104 | * | ||
105 | * Specifically, -ENOSYS is returned if a fault handler isn't installed | ||
106 | * (though fault handlers can also return -ENOSYS, in case they want to | ||
107 | * elicit the default behavior of the IOMMU drivers). | ||
108 | */ | ||
109 | static inline int report_iommu_fault(struct iommu_domain *domain, | ||
110 | struct device *dev, unsigned long iova, int flags) | ||
111 | { | ||
112 | int ret = -ENOSYS; | ||
70 | 113 | ||
71 | #else /* CONFIG_IOMMU_API */ | 114 | /* |
115 | * if upper layers showed interest and installed a fault handler, | ||
116 | * invoke it. | ||
117 | */ | ||
118 | if (domain->handler) | ||
119 | ret = domain->handler(domain, dev, iova, flags); | ||
72 | 120 | ||
73 | static inline void register_iommu(struct iommu_ops *ops) | 121 | return ret; |
74 | { | ||
75 | } | 122 | } |
76 | 123 | ||
77 | static inline bool iommu_found(void) | 124 | #else /* CONFIG_IOMMU_API */ |
125 | |||
126 | struct iommu_ops {}; | ||
127 | |||
128 | static inline bool iommu_present(struct bus_type *bus) | ||
78 | { | 129 | { |
79 | return false; | 130 | return false; |
80 | } | 131 | } |
81 | 132 | ||
82 | static inline struct iommu_domain *iommu_domain_alloc(void) | 133 | static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) |
83 | { | 134 | { |
84 | return NULL; | 135 | return NULL; |
85 | } | 136 | } |
@@ -123,6 +174,11 @@ static inline int domain_has_cap(struct iommu_domain *domain, | |||
123 | return 0; | 174 | return 0; |
124 | } | 175 | } |
125 | 176 | ||
177 | static inline void iommu_set_fault_handler(struct iommu_domain *domain, | ||
178 | iommu_fault_handler_t handler) | ||
179 | { | ||
180 | } | ||
181 | |||
126 | #endif /* CONFIG_IOMMU_API */ | 182 | #endif /* CONFIG_IOMMU_API */ |
127 | 183 | ||
128 | #endif /* __LINUX_IOMMU_H */ | 184 | #endif /* __LINUX_IOMMU_H */ |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index db07bfd9298e..79700fa2dfc4 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -62,6 +62,8 @@ struct dma_debug_entry { | |||
62 | #endif | 62 | #endif |
63 | }; | 63 | }; |
64 | 64 | ||
65 | typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); | ||
66 | |||
65 | struct hash_bucket { | 67 | struct hash_bucket { |
66 | struct list_head list; | 68 | struct list_head list; |
67 | spinlock_t lock; | 69 | spinlock_t lock; |
@@ -240,18 +242,37 @@ static void put_hash_bucket(struct hash_bucket *bucket, | |||
240 | spin_unlock_irqrestore(&bucket->lock, __flags); | 242 | spin_unlock_irqrestore(&bucket->lock, __flags); |
241 | } | 243 | } |
242 | 244 | ||
245 | static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) | ||
246 | { | ||
247 | return ((a->dev_addr == a->dev_addr) && | ||
248 | (a->dev == b->dev)) ? true : false; | ||
249 | } | ||
250 | |||
251 | static bool containing_match(struct dma_debug_entry *a, | ||
252 | struct dma_debug_entry *b) | ||
253 | { | ||
254 | if (a->dev != b->dev) | ||
255 | return false; | ||
256 | |||
257 | if ((b->dev_addr <= a->dev_addr) && | ||
258 | ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) | ||
259 | return true; | ||
260 | |||
261 | return false; | ||
262 | } | ||
263 | |||
243 | /* | 264 | /* |
244 | * Search a given entry in the hash bucket list | 265 | * Search a given entry in the hash bucket list |
245 | */ | 266 | */ |
246 | static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | 267 | static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, |
247 | struct dma_debug_entry *ref) | 268 | struct dma_debug_entry *ref, |
269 | match_fn match) | ||
248 | { | 270 | { |
249 | struct dma_debug_entry *entry, *ret = NULL; | 271 | struct dma_debug_entry *entry, *ret = NULL; |
250 | int matches = 0, match_lvl, last_lvl = 0; | 272 | int matches = 0, match_lvl, last_lvl = 0; |
251 | 273 | ||
252 | list_for_each_entry(entry, &bucket->list, list) { | 274 | list_for_each_entry(entry, &bucket->list, list) { |
253 | if ((entry->dev_addr != ref->dev_addr) || | 275 | if (!match(ref, entry)) |
254 | (entry->dev != ref->dev)) | ||
255 | continue; | 276 | continue; |
256 | 277 | ||
257 | /* | 278 | /* |
@@ -293,6 +314,39 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | |||
293 | return ret; | 314 | return ret; |
294 | } | 315 | } |
295 | 316 | ||
317 | static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, | ||
318 | struct dma_debug_entry *ref) | ||
319 | { | ||
320 | return __hash_bucket_find(bucket, ref, exact_match); | ||
321 | } | ||
322 | |||
323 | static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, | ||
324 | struct dma_debug_entry *ref, | ||
325 | unsigned long *flags) | ||
326 | { | ||
327 | |||
328 | unsigned int max_range = dma_get_max_seg_size(ref->dev); | ||
329 | struct dma_debug_entry *entry, index = *ref; | ||
330 | unsigned int range = 0; | ||
331 | |||
332 | while (range <= max_range) { | ||
333 | entry = __hash_bucket_find(*bucket, &index, containing_match); | ||
334 | |||
335 | if (entry) | ||
336 | return entry; | ||
337 | |||
338 | /* | ||
339 | * Nothing found, go back a hash bucket | ||
340 | */ | ||
341 | put_hash_bucket(*bucket, flags); | ||
342 | range += (1 << HASH_FN_SHIFT); | ||
343 | index.dev_addr -= (1 << HASH_FN_SHIFT); | ||
344 | *bucket = get_hash_bucket(&index, flags); | ||
345 | } | ||
346 | |||
347 | return NULL; | ||
348 | } | ||
349 | |||
296 | /* | 350 | /* |
297 | * Add an entry to a hash bucket | 351 | * Add an entry to a hash bucket |
298 | */ | 352 | */ |
@@ -802,7 +856,7 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
802 | } | 856 | } |
803 | 857 | ||
804 | bucket = get_hash_bucket(ref, &flags); | 858 | bucket = get_hash_bucket(ref, &flags); |
805 | entry = hash_bucket_find(bucket, ref); | 859 | entry = bucket_find_exact(bucket, ref); |
806 | 860 | ||
807 | if (!entry) { | 861 | if (!entry) { |
808 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " | 862 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " |
@@ -902,7 +956,7 @@ static void check_sync(struct device *dev, | |||
902 | 956 | ||
903 | bucket = get_hash_bucket(ref, &flags); | 957 | bucket = get_hash_bucket(ref, &flags); |
904 | 958 | ||
905 | entry = hash_bucket_find(bucket, ref); | 959 | entry = bucket_find_contain(&bucket, ref, &flags); |
906 | 960 | ||
907 | if (!entry) { | 961 | if (!entry) { |
908 | err_printk(dev, NULL, "DMA-API: device driver tries " | 962 | err_printk(dev, NULL, "DMA-API: device driver tries " |
@@ -1060,7 +1114,7 @@ static int get_nr_mapped_entries(struct device *dev, | |||
1060 | int mapped_ents; | 1114 | int mapped_ents; |
1061 | 1115 | ||
1062 | bucket = get_hash_bucket(ref, &flags); | 1116 | bucket = get_hash_bucket(ref, &flags); |
1063 | entry = hash_bucket_find(bucket, ref); | 1117 | entry = bucket_find_exact(bucket, ref); |
1064 | mapped_ents = 0; | 1118 | mapped_ents = 0; |
1065 | 1119 | ||
1066 | if (entry) | 1120 | if (entry) |
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 967aba133a62..d5f3b8d1e095 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c | |||
@@ -232,12 +232,12 @@ int kvm_iommu_map_guest(struct kvm *kvm) | |||
232 | { | 232 | { |
233 | int r; | 233 | int r; |
234 | 234 | ||
235 | if (!iommu_found()) { | 235 | if (!iommu_present(&pci_bus_type)) { |
236 | printk(KERN_ERR "%s: iommu not found\n", __func__); | 236 | printk(KERN_ERR "%s: iommu not found\n", __func__); |
237 | return -ENODEV; | 237 | return -ENODEV; |
238 | } | 238 | } |
239 | 239 | ||
240 | kvm->arch.iommu_domain = iommu_domain_alloc(); | 240 | kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); |
241 | if (!kvm->arch.iommu_domain) | 241 | if (!kvm->arch.iommu_domain) |
242 | return -ENOMEM; | 242 | return -ENOMEM; |
243 | 243 | ||