diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-30 18:46:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-30 18:46:19 -0400 |
commit | 0cfdc724390fb9370f27bb9a133eadf69114dd21 (patch) | |
tree | 2abfb0112c46c837c6b42007eadfc389293b7710 /arch/arm | |
parent | b48aeab65e9fc4b0c9757c5fbc1d722544eb8786 (diff) | |
parent | 1abb4ba596a91a839f82e0c9c837b777d574e83d (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (33 commits)
iommu/core: Remove global iommu_ops and register_iommu
iommu/msm: Use bus_set_iommu instead of register_iommu
iommu/omap: Use bus_set_iommu instead of register_iommu
iommu/vt-d: Use bus_set_iommu instead of register_iommu
iommu/amd: Use bus_set_iommu instead of register_iommu
iommu/core: Use bus->iommu_ops in the iommu-api
iommu/core: Convert iommu_found to iommu_present
iommu/core: Add bus_type parameter to iommu_domain_alloc
Driver core: Add iommu_ops to bus_type
iommu/core: Define iommu_ops and register_iommu only with CONFIG_IOMMU_API
iommu/amd: Fix wrong shift direction
iommu/omap: always provide iommu debug code
iommu/core: let drivers know if an iommu fault handler isn't installed
iommu/core: export iommu_set_fault_handler()
iommu/omap: Fix build error with !IOMMU_SUPPORT
iommu/omap: Migrate to the generic fault report mechanism
iommu/core: Add fault reporting mechanism
iommu/core: Use PAGE_SIZE instead of hard-coded value
iommu/core: use the existing IS_ALIGNED macro
iommu/msm: ->unmap() should return order of unmapped page
...
Fixup trivial conflicts in drivers/iommu/Makefile: "move omap iommu to
dedicated iommu folder" vs "Rename the DMAR and INTR_REMAP config
options" just happened to touch lines next to each other.
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/mach-omap2/iommu2.c | 31 | ||||
-rw-r--r-- | arch/arm/plat-omap/Kconfig | 12 | ||||
-rw-r--r-- | arch/arm/plat-omap/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/plat-omap/include/plat/iommu.h | 78 | ||||
-rw-r--r-- | arch/arm/plat-omap/include/plat/iommu2.h | 4 | ||||
-rw-r--r-- | arch/arm/plat-omap/include/plat/iopgtable.h (renamed from arch/arm/plat-omap/iopgtable.h) | 20 | ||||
-rw-r--r-- | arch/arm/plat-omap/include/plat/iovmm.h | 30 | ||||
-rw-r--r-- | arch/arm/plat-omap/iommu-debug.c | 418 | ||||
-rw-r--r-- | arch/arm/plat-omap/iommu.c | 1102 | ||||
-rw-r--r-- | arch/arm/plat-omap/iovmm.c | 904 |
10 files changed, 88 insertions, 2513 deletions
diff --git a/arch/arm/mach-omap2/iommu2.c b/arch/arm/mach-omap2/iommu2.c index f286012783c6..eefc37912ef3 100644 --- a/arch/arm/mach-omap2/iommu2.c +++ b/arch/arm/mach-omap2/iommu2.c | |||
@@ -66,7 +66,7 @@ | |||
66 | ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) | 66 | ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) |
67 | 67 | ||
68 | 68 | ||
69 | static void __iommu_set_twl(struct iommu *obj, bool on) | 69 | static void __iommu_set_twl(struct omap_iommu *obj, bool on) |
70 | { | 70 | { |
71 | u32 l = iommu_read_reg(obj, MMU_CNTL); | 71 | u32 l = iommu_read_reg(obj, MMU_CNTL); |
72 | 72 | ||
@@ -85,7 +85,7 @@ static void __iommu_set_twl(struct iommu *obj, bool on) | |||
85 | } | 85 | } |
86 | 86 | ||
87 | 87 | ||
88 | static int omap2_iommu_enable(struct iommu *obj) | 88 | static int omap2_iommu_enable(struct omap_iommu *obj) |
89 | { | 89 | { |
90 | u32 l, pa; | 90 | u32 l, pa; |
91 | unsigned long timeout; | 91 | unsigned long timeout; |
@@ -127,7 +127,7 @@ static int omap2_iommu_enable(struct iommu *obj) | |||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static void omap2_iommu_disable(struct iommu *obj) | 130 | static void omap2_iommu_disable(struct omap_iommu *obj) |
131 | { | 131 | { |
132 | u32 l = iommu_read_reg(obj, MMU_CNTL); | 132 | u32 l = iommu_read_reg(obj, MMU_CNTL); |
133 | 133 | ||
@@ -138,12 +138,12 @@ static void omap2_iommu_disable(struct iommu *obj) | |||
138 | dev_dbg(obj->dev, "%s is shutting down\n", obj->name); | 138 | dev_dbg(obj->dev, "%s is shutting down\n", obj->name); |
139 | } | 139 | } |
140 | 140 | ||
141 | static void omap2_iommu_set_twl(struct iommu *obj, bool on) | 141 | static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on) |
142 | { | 142 | { |
143 | __iommu_set_twl(obj, false); | 143 | __iommu_set_twl(obj, false); |
144 | } | 144 | } |
145 | 145 | ||
146 | static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra) | 146 | static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra) |
147 | { | 147 | { |
148 | u32 stat, da; | 148 | u32 stat, da; |
149 | u32 errs = 0; | 149 | u32 errs = 0; |
@@ -173,13 +173,13 @@ static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra) | |||
173 | return errs; | 173 | return errs; |
174 | } | 174 | } |
175 | 175 | ||
176 | static void omap2_tlb_read_cr(struct iommu *obj, struct cr_regs *cr) | 176 | static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) |
177 | { | 177 | { |
178 | cr->cam = iommu_read_reg(obj, MMU_READ_CAM); | 178 | cr->cam = iommu_read_reg(obj, MMU_READ_CAM); |
179 | cr->ram = iommu_read_reg(obj, MMU_READ_RAM); | 179 | cr->ram = iommu_read_reg(obj, MMU_READ_RAM); |
180 | } | 180 | } |
181 | 181 | ||
182 | static void omap2_tlb_load_cr(struct iommu *obj, struct cr_regs *cr) | 182 | static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) |
183 | { | 183 | { |
184 | iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); | 184 | iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); |
185 | iommu_write_reg(obj, cr->ram, MMU_RAM); | 185 | iommu_write_reg(obj, cr->ram, MMU_RAM); |
@@ -193,7 +193,8 @@ static u32 omap2_cr_to_virt(struct cr_regs *cr) | |||
193 | return cr->cam & mask; | 193 | return cr->cam & mask; |
194 | } | 194 | } |
195 | 195 | ||
196 | static struct cr_regs *omap2_alloc_cr(struct iommu *obj, struct iotlb_entry *e) | 196 | static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj, |
197 | struct iotlb_entry *e) | ||
197 | { | 198 | { |
198 | struct cr_regs *cr; | 199 | struct cr_regs *cr; |
199 | 200 | ||
@@ -230,7 +231,8 @@ static u32 omap2_get_pte_attr(struct iotlb_entry *e) | |||
230 | return attr; | 231 | return attr; |
231 | } | 232 | } |
232 | 233 | ||
233 | static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf) | 234 | static ssize_t |
235 | omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf) | ||
234 | { | 236 | { |
235 | char *p = buf; | 237 | char *p = buf; |
236 | 238 | ||
@@ -254,7 +256,8 @@ static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf) | |||
254 | goto out; \ | 256 | goto out; \ |
255 | } while (0) | 257 | } while (0) |
256 | 258 | ||
257 | static ssize_t omap2_iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len) | 259 | static ssize_t |
260 | omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) | ||
258 | { | 261 | { |
259 | char *p = buf; | 262 | char *p = buf; |
260 | 263 | ||
@@ -280,7 +283,7 @@ out: | |||
280 | return p - buf; | 283 | return p - buf; |
281 | } | 284 | } |
282 | 285 | ||
283 | static void omap2_iommu_save_ctx(struct iommu *obj) | 286 | static void omap2_iommu_save_ctx(struct omap_iommu *obj) |
284 | { | 287 | { |
285 | int i; | 288 | int i; |
286 | u32 *p = obj->ctx; | 289 | u32 *p = obj->ctx; |
@@ -293,7 +296,7 @@ static void omap2_iommu_save_ctx(struct iommu *obj) | |||
293 | BUG_ON(p[0] != IOMMU_ARCH_VERSION); | 296 | BUG_ON(p[0] != IOMMU_ARCH_VERSION); |
294 | } | 297 | } |
295 | 298 | ||
296 | static void omap2_iommu_restore_ctx(struct iommu *obj) | 299 | static void omap2_iommu_restore_ctx(struct omap_iommu *obj) |
297 | { | 300 | { |
298 | int i; | 301 | int i; |
299 | u32 *p = obj->ctx; | 302 | u32 *p = obj->ctx; |
@@ -343,13 +346,13 @@ static const struct iommu_functions omap2_iommu_ops = { | |||
343 | 346 | ||
344 | static int __init omap2_iommu_init(void) | 347 | static int __init omap2_iommu_init(void) |
345 | { | 348 | { |
346 | return install_iommu_arch(&omap2_iommu_ops); | 349 | return omap_install_iommu_arch(&omap2_iommu_ops); |
347 | } | 350 | } |
348 | module_init(omap2_iommu_init); | 351 | module_init(omap2_iommu_init); |
349 | 352 | ||
350 | static void __exit omap2_iommu_exit(void) | 353 | static void __exit omap2_iommu_exit(void) |
351 | { | 354 | { |
352 | uninstall_iommu_arch(&omap2_iommu_ops); | 355 | omap_uninstall_iommu_arch(&omap2_iommu_ops); |
353 | } | 356 | } |
354 | module_exit(omap2_iommu_exit); | 357 | module_exit(omap2_iommu_exit); |
355 | 358 | ||
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index 6f4edd3408c2..aa59f4247dc5 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig | |||
@@ -134,18 +134,6 @@ config OMAP_MBOX_KFIFO_SIZE | |||
134 | This can also be changed at runtime (via the mbox_kfifo_size | 134 | This can also be changed at runtime (via the mbox_kfifo_size |
135 | module parameter). | 135 | module parameter). |
136 | 136 | ||
137 | config OMAP_IOMMU | ||
138 | tristate | ||
139 | |||
140 | config OMAP_IOMMU_DEBUG | ||
141 | tristate "Export OMAP IOMMU internals in DebugFS" | ||
142 | depends on OMAP_IOMMU && DEBUG_FS | ||
143 | help | ||
144 | Select this to see extensive information about | ||
145 | the internal state of OMAP IOMMU in debugfs. | ||
146 | |||
147 | Say N unless you know you need this. | ||
148 | |||
149 | config OMAP_IOMMU_IVA2 | 137 | config OMAP_IOMMU_IVA2 |
150 | bool | 138 | bool |
151 | 139 | ||
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile index f0233e6abcdf..985262242f25 100644 --- a/arch/arm/plat-omap/Makefile +++ b/arch/arm/plat-omap/Makefile | |||
@@ -18,8 +18,6 @@ obj-$(CONFIG_ARCH_OMAP3) += omap_device.o | |||
18 | obj-$(CONFIG_ARCH_OMAP4) += omap_device.o | 18 | obj-$(CONFIG_ARCH_OMAP4) += omap_device.o |
19 | 19 | ||
20 | obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o | 20 | obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o |
21 | obj-$(CONFIG_OMAP_IOMMU) += iommu.o iovmm.o | ||
22 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += iommu-debug.o | ||
23 | 21 | ||
24 | obj-$(CONFIG_CPU_FREQ) += cpu-omap.o | 22 | obj-$(CONFIG_CPU_FREQ) += cpu-omap.o |
25 | obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o | 23 | obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o |
diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h index 174f1b9c8c03..a1d79ee19250 100644 --- a/arch/arm/plat-omap/include/plat/iommu.h +++ b/arch/arm/plat-omap/include/plat/iommu.h | |||
@@ -25,16 +25,17 @@ struct iotlb_entry { | |||
25 | }; | 25 | }; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | struct iommu { | 28 | struct omap_iommu { |
29 | const char *name; | 29 | const char *name; |
30 | struct module *owner; | 30 | struct module *owner; |
31 | struct clk *clk; | 31 | struct clk *clk; |
32 | void __iomem *regbase; | 32 | void __iomem *regbase; |
33 | struct device *dev; | 33 | struct device *dev; |
34 | void *isr_priv; | 34 | void *isr_priv; |
35 | struct iommu_domain *domain; | ||
35 | 36 | ||
36 | unsigned int refcount; | 37 | unsigned int refcount; |
37 | struct mutex iommu_lock; /* global for this whole object */ | 38 | spinlock_t iommu_lock; /* global for this whole object */ |
38 | 39 | ||
39 | /* | 40 | /* |
40 | * We don't change iopgd for a situation like pgd for a task, | 41 | * We don't change iopgd for a situation like pgd for a task, |
@@ -48,8 +49,6 @@ struct iommu { | |||
48 | struct list_head mmap; | 49 | struct list_head mmap; |
49 | struct mutex mmap_lock; /* protect mmap */ | 50 | struct mutex mmap_lock; /* protect mmap */ |
50 | 51 | ||
51 | int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, void *priv); | ||
52 | |||
53 | void *ctx; /* iommu context: registres saved area */ | 52 | void *ctx; /* iommu context: registres saved area */ |
54 | u32 da_start; | 53 | u32 da_start; |
55 | u32 da_end; | 54 | u32 da_end; |
@@ -81,25 +80,27 @@ struct iotlb_lock { | |||
81 | struct iommu_functions { | 80 | struct iommu_functions { |
82 | unsigned long version; | 81 | unsigned long version; |
83 | 82 | ||
84 | int (*enable)(struct iommu *obj); | 83 | int (*enable)(struct omap_iommu *obj); |
85 | void (*disable)(struct iommu *obj); | 84 | void (*disable)(struct omap_iommu *obj); |
86 | void (*set_twl)(struct iommu *obj, bool on); | 85 | void (*set_twl)(struct omap_iommu *obj, bool on); |
87 | u32 (*fault_isr)(struct iommu *obj, u32 *ra); | 86 | u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra); |
88 | 87 | ||
89 | void (*tlb_read_cr)(struct iommu *obj, struct cr_regs *cr); | 88 | void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr); |
90 | void (*tlb_load_cr)(struct iommu *obj, struct cr_regs *cr); | 89 | void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr); |
91 | 90 | ||
92 | struct cr_regs *(*alloc_cr)(struct iommu *obj, struct iotlb_entry *e); | 91 | struct cr_regs *(*alloc_cr)(struct omap_iommu *obj, |
92 | struct iotlb_entry *e); | ||
93 | int (*cr_valid)(struct cr_regs *cr); | 93 | int (*cr_valid)(struct cr_regs *cr); |
94 | u32 (*cr_to_virt)(struct cr_regs *cr); | 94 | u32 (*cr_to_virt)(struct cr_regs *cr); |
95 | void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); | 95 | void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); |
96 | ssize_t (*dump_cr)(struct iommu *obj, struct cr_regs *cr, char *buf); | 96 | ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr, |
97 | char *buf); | ||
97 | 98 | ||
98 | u32 (*get_pte_attr)(struct iotlb_entry *e); | 99 | u32 (*get_pte_attr)(struct iotlb_entry *e); |
99 | 100 | ||
100 | void (*save_ctx)(struct iommu *obj); | 101 | void (*save_ctx)(struct omap_iommu *obj); |
101 | void (*restore_ctx)(struct iommu *obj); | 102 | void (*restore_ctx)(struct omap_iommu *obj); |
102 | ssize_t (*dump_ctx)(struct iommu *obj, char *buf, ssize_t len); | 103 | ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len); |
103 | }; | 104 | }; |
104 | 105 | ||
105 | struct iommu_platform_data { | 106 | struct iommu_platform_data { |
@@ -150,40 +151,31 @@ struct iommu_platform_data { | |||
150 | /* | 151 | /* |
151 | * global functions | 152 | * global functions |
152 | */ | 153 | */ |
153 | extern u32 iommu_arch_version(void); | 154 | extern u32 omap_iommu_arch_version(void); |
154 | 155 | ||
155 | extern void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); | 156 | extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); |
156 | extern u32 iotlb_cr_to_virt(struct cr_regs *cr); | 157 | |
157 | 158 | extern int | |
158 | extern int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e); | 159 | omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); |
159 | extern void iommu_set_twl(struct iommu *obj, bool on); | 160 | |
160 | extern void flush_iotlb_page(struct iommu *obj, u32 da); | 161 | extern int omap_iommu_set_isr(const char *name, |
161 | extern void flush_iotlb_range(struct iommu *obj, u32 start, u32 end); | 162 | int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs, |
162 | extern void flush_iotlb_all(struct iommu *obj); | ||
163 | |||
164 | extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e); | ||
165 | extern void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, | ||
166 | u32 **ppte); | ||
167 | extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova); | ||
168 | |||
169 | extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end); | ||
170 | extern struct iommu *iommu_get(const char *name); | ||
171 | extern void iommu_put(struct iommu *obj); | ||
172 | extern int iommu_set_isr(const char *name, | ||
173 | int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, | ||
174 | void *priv), | 163 | void *priv), |
175 | void *isr_priv); | 164 | void *isr_priv); |
176 | 165 | ||
177 | extern void iommu_save_ctx(struct iommu *obj); | 166 | extern void omap_iommu_save_ctx(struct omap_iommu *obj); |
178 | extern void iommu_restore_ctx(struct iommu *obj); | 167 | extern void omap_iommu_restore_ctx(struct omap_iommu *obj); |
179 | 168 | ||
180 | extern int install_iommu_arch(const struct iommu_functions *ops); | 169 | extern int omap_install_iommu_arch(const struct iommu_functions *ops); |
181 | extern void uninstall_iommu_arch(const struct iommu_functions *ops); | 170 | extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops); |
182 | 171 | ||
183 | extern int foreach_iommu_device(void *data, | 172 | extern int omap_foreach_iommu_device(void *data, |
184 | int (*fn)(struct device *, void *)); | 173 | int (*fn)(struct device *, void *)); |
185 | 174 | ||
186 | extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len); | 175 | extern ssize_t |
187 | extern size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t len); | 176 | omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); |
177 | extern size_t | ||
178 | omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); | ||
179 | struct device *omap_find_iommu_device(const char *name); | ||
188 | 180 | ||
189 | #endif /* __MACH_IOMMU_H */ | 181 | #endif /* __MACH_IOMMU_H */ |
diff --git a/arch/arm/plat-omap/include/plat/iommu2.h b/arch/arm/plat-omap/include/plat/iommu2.h index 10ad05f410e9..d4116b595e40 100644 --- a/arch/arm/plat-omap/include/plat/iommu2.h +++ b/arch/arm/plat-omap/include/plat/iommu2.h | |||
@@ -83,12 +83,12 @@ | |||
83 | /* | 83 | /* |
84 | * register accessors | 84 | * register accessors |
85 | */ | 85 | */ |
86 | static inline u32 iommu_read_reg(struct iommu *obj, size_t offs) | 86 | static inline u32 iommu_read_reg(struct omap_iommu *obj, size_t offs) |
87 | { | 87 | { |
88 | return __raw_readl(obj->regbase + offs); | 88 | return __raw_readl(obj->regbase + offs); |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline void iommu_write_reg(struct iommu *obj, u32 val, size_t offs) | 91 | static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs) |
92 | { | 92 | { |
93 | __raw_writel(val, obj->regbase + offs); | 93 | __raw_writel(val, obj->regbase + offs); |
94 | } | 94 | } |
diff --git a/arch/arm/plat-omap/iopgtable.h b/arch/arm/plat-omap/include/plat/iopgtable.h index c3e93bb0911f..66a813977d52 100644 --- a/arch/arm/plat-omap/iopgtable.h +++ b/arch/arm/plat-omap/include/plat/iopgtable.h | |||
@@ -56,6 +56,19 @@ | |||
56 | 56 | ||
57 | #define IOPAGE_MASK IOPTE_MASK | 57 | #define IOPAGE_MASK IOPTE_MASK |
58 | 58 | ||
59 | /** | ||
60 | * omap_iommu_translate() - va to pa translation | ||
61 | * @d: omap iommu descriptor | ||
62 | * @va: virtual address | ||
63 | * @mask: omap iommu descriptor mask | ||
64 | * | ||
65 | * va to pa translation | ||
66 | */ | ||
67 | static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) | ||
68 | { | ||
69 | return (d & mask) | (va & (~mask)); | ||
70 | } | ||
71 | |||
59 | /* | 72 | /* |
60 | * some descriptor attributes. | 73 | * some descriptor attributes. |
61 | */ | 74 | */ |
@@ -64,10 +77,15 @@ | |||
64 | #define IOPGD_SUPER (1 << 18 | 2 << 0) | 77 | #define IOPGD_SUPER (1 << 18 | 2 << 0) |
65 | 78 | ||
66 | #define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE) | 79 | #define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE) |
80 | #define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION) | ||
81 | #define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER) | ||
67 | 82 | ||
68 | #define IOPTE_SMALL (2 << 0) | 83 | #define IOPTE_SMALL (2 << 0) |
69 | #define IOPTE_LARGE (1 << 0) | 84 | #define IOPTE_LARGE (1 << 0) |
70 | 85 | ||
86 | #define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL) | ||
87 | #define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE) | ||
88 | |||
71 | /* to find an entry in a page-table-directory */ | 89 | /* to find an entry in a page-table-directory */ |
72 | #define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1)) | 90 | #define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1)) |
73 | #define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) | 91 | #define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) |
@@ -97,6 +115,6 @@ static inline u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, | |||
97 | } | 115 | } |
98 | 116 | ||
99 | #define to_iommu(dev) \ | 117 | #define to_iommu(dev) \ |
100 | (struct iommu *)platform_get_drvdata(to_platform_device(dev)) | 118 | (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)) |
101 | 119 | ||
102 | #endif /* __PLAT_OMAP_IOMMU_H */ | 120 | #endif /* __PLAT_OMAP_IOMMU_H */ |
diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h index e992b9655fbc..6af1a91c0f36 100644 --- a/arch/arm/plat-omap/include/plat/iovmm.h +++ b/arch/arm/plat-omap/include/plat/iovmm.h | |||
@@ -13,8 +13,10 @@ | |||
13 | #ifndef __IOMMU_MMAP_H | 13 | #ifndef __IOMMU_MMAP_H |
14 | #define __IOMMU_MMAP_H | 14 | #define __IOMMU_MMAP_H |
15 | 15 | ||
16 | #include <linux/iommu.h> | ||
17 | |||
16 | struct iovm_struct { | 18 | struct iovm_struct { |
17 | struct iommu *iommu; /* iommu object which this belongs to */ | 19 | struct omap_iommu *iommu; /* iommu object which this belongs to */ |
18 | u32 da_start; /* area definition */ | 20 | u32 da_start; /* area definition */ |
19 | u32 da_end; | 21 | u32 da_end; |
20 | u32 flags; /* IOVMF_: see below */ | 22 | u32 flags; /* IOVMF_: see below */ |
@@ -70,20 +72,18 @@ struct iovm_struct { | |||
70 | #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) | 72 | #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) |
71 | 73 | ||
72 | 74 | ||
73 | extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da); | 75 | extern struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da); |
74 | extern u32 iommu_vmap(struct iommu *obj, u32 da, | 76 | extern u32 |
77 | omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, | ||
75 | const struct sg_table *sgt, u32 flags); | 78 | const struct sg_table *sgt, u32 flags); |
76 | extern struct sg_table *iommu_vunmap(struct iommu *obj, u32 da); | 79 | extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain, |
77 | extern u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, | 80 | struct omap_iommu *obj, u32 da); |
78 | u32 flags); | 81 | extern u32 |
79 | extern void iommu_vfree(struct iommu *obj, const u32 da); | 82 | omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, |
80 | extern u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, | 83 | u32 da, size_t bytes, u32 flags); |
81 | u32 flags); | 84 | extern void |
82 | extern void iommu_kunmap(struct iommu *obj, u32 da); | 85 | omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj, |
83 | extern u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, | 86 | const u32 da); |
84 | u32 flags); | 87 | extern void *omap_da_to_va(struct omap_iommu *obj, u32 da); |
85 | extern void iommu_kfree(struct iommu *obj, u32 da); | ||
86 | |||
87 | extern void *da_to_va(struct iommu *obj, u32 da); | ||
88 | 88 | ||
89 | #endif /* __IOMMU_MMAP_H */ | 89 | #endif /* __IOMMU_MMAP_H */ |
diff --git a/arch/arm/plat-omap/iommu-debug.c b/arch/arm/plat-omap/iommu-debug.c deleted file mode 100644 index f07cf2f08e09..000000000000 --- a/arch/arm/plat-omap/iommu-debug.c +++ /dev/null | |||
@@ -1,418 +0,0 @@ | |||
1 | /* | ||
2 | * omap iommu: debugfs interface | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 Nokia Corporation | ||
5 | * | ||
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/err.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/debugfs.h> | ||
20 | |||
21 | #include <plat/iommu.h> | ||
22 | #include <plat/iovmm.h> | ||
23 | |||
24 | #include "iopgtable.h" | ||
25 | |||
26 | #define MAXCOLUMN 100 /* for short messages */ | ||
27 | |||
28 | static DEFINE_MUTEX(iommu_debug_lock); | ||
29 | |||
30 | static struct dentry *iommu_debug_root; | ||
31 | |||
32 | static ssize_t debug_read_ver(struct file *file, char __user *userbuf, | ||
33 | size_t count, loff_t *ppos) | ||
34 | { | ||
35 | u32 ver = iommu_arch_version(); | ||
36 | char buf[MAXCOLUMN], *p = buf; | ||
37 | |||
38 | p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); | ||
39 | |||
40 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
41 | } | ||
42 | |||
43 | static ssize_t debug_read_regs(struct file *file, char __user *userbuf, | ||
44 | size_t count, loff_t *ppos) | ||
45 | { | ||
46 | struct iommu *obj = file->private_data; | ||
47 | char *p, *buf; | ||
48 | ssize_t bytes; | ||
49 | |||
50 | buf = kmalloc(count, GFP_KERNEL); | ||
51 | if (!buf) | ||
52 | return -ENOMEM; | ||
53 | p = buf; | ||
54 | |||
55 | mutex_lock(&iommu_debug_lock); | ||
56 | |||
57 | bytes = iommu_dump_ctx(obj, p, count); | ||
58 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); | ||
59 | |||
60 | mutex_unlock(&iommu_debug_lock); | ||
61 | kfree(buf); | ||
62 | |||
63 | return bytes; | ||
64 | } | ||
65 | |||
66 | static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, | ||
67 | size_t count, loff_t *ppos) | ||
68 | { | ||
69 | struct iommu *obj = file->private_data; | ||
70 | char *p, *buf; | ||
71 | ssize_t bytes, rest; | ||
72 | |||
73 | buf = kmalloc(count, GFP_KERNEL); | ||
74 | if (!buf) | ||
75 | return -ENOMEM; | ||
76 | p = buf; | ||
77 | |||
78 | mutex_lock(&iommu_debug_lock); | ||
79 | |||
80 | p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); | ||
81 | p += sprintf(p, "-----------------------------------------\n"); | ||
82 | rest = count - (p - buf); | ||
83 | p += dump_tlb_entries(obj, p, rest); | ||
84 | |||
85 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
86 | |||
87 | mutex_unlock(&iommu_debug_lock); | ||
88 | kfree(buf); | ||
89 | |||
90 | return bytes; | ||
91 | } | ||
92 | |||
93 | static ssize_t debug_write_pagetable(struct file *file, | ||
94 | const char __user *userbuf, size_t count, loff_t *ppos) | ||
95 | { | ||
96 | struct iotlb_entry e; | ||
97 | struct cr_regs cr; | ||
98 | int err; | ||
99 | struct iommu *obj = file->private_data; | ||
100 | char buf[MAXCOLUMN], *p = buf; | ||
101 | |||
102 | count = min(count, sizeof(buf)); | ||
103 | |||
104 | mutex_lock(&iommu_debug_lock); | ||
105 | if (copy_from_user(p, userbuf, count)) { | ||
106 | mutex_unlock(&iommu_debug_lock); | ||
107 | return -EFAULT; | ||
108 | } | ||
109 | |||
110 | sscanf(p, "%x %x", &cr.cam, &cr.ram); | ||
111 | if (!cr.cam || !cr.ram) { | ||
112 | mutex_unlock(&iommu_debug_lock); | ||
113 | return -EINVAL; | ||
114 | } | ||
115 | |||
116 | iotlb_cr_to_e(&cr, &e); | ||
117 | err = iopgtable_store_entry(obj, &e); | ||
118 | if (err) | ||
119 | dev_err(obj->dev, "%s: fail to store cr\n", __func__); | ||
120 | |||
121 | mutex_unlock(&iommu_debug_lock); | ||
122 | return count; | ||
123 | } | ||
124 | |||
125 | #define dump_ioptable_entry_one(lv, da, val) \ | ||
126 | ({ \ | ||
127 | int __err = 0; \ | ||
128 | ssize_t bytes; \ | ||
129 | const int maxcol = 22; \ | ||
130 | const char *str = "%d: %08x %08x\n"; \ | ||
131 | bytes = snprintf(p, maxcol, str, lv, da, val); \ | ||
132 | p += bytes; \ | ||
133 | len -= bytes; \ | ||
134 | if (len < maxcol) \ | ||
135 | __err = -ENOMEM; \ | ||
136 | __err; \ | ||
137 | }) | ||
138 | |||
139 | static ssize_t dump_ioptable(struct iommu *obj, char *buf, ssize_t len) | ||
140 | { | ||
141 | int i; | ||
142 | u32 *iopgd; | ||
143 | char *p = buf; | ||
144 | |||
145 | spin_lock(&obj->page_table_lock); | ||
146 | |||
147 | iopgd = iopgd_offset(obj, 0); | ||
148 | for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { | ||
149 | int j, err; | ||
150 | u32 *iopte; | ||
151 | u32 da; | ||
152 | |||
153 | if (!*iopgd) | ||
154 | continue; | ||
155 | |||
156 | if (!(*iopgd & IOPGD_TABLE)) { | ||
157 | da = i << IOPGD_SHIFT; | ||
158 | |||
159 | err = dump_ioptable_entry_one(1, da, *iopgd); | ||
160 | if (err) | ||
161 | goto out; | ||
162 | continue; | ||
163 | } | ||
164 | |||
165 | iopte = iopte_offset(iopgd, 0); | ||
166 | |||
167 | for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { | ||
168 | if (!*iopte) | ||
169 | continue; | ||
170 | |||
171 | da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); | ||
172 | err = dump_ioptable_entry_one(2, da, *iopgd); | ||
173 | if (err) | ||
174 | goto out; | ||
175 | } | ||
176 | } | ||
177 | out: | ||
178 | spin_unlock(&obj->page_table_lock); | ||
179 | |||
180 | return p - buf; | ||
181 | } | ||
182 | |||
183 | static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, | ||
184 | size_t count, loff_t *ppos) | ||
185 | { | ||
186 | struct iommu *obj = file->private_data; | ||
187 | char *p, *buf; | ||
188 | size_t bytes; | ||
189 | |||
190 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
191 | if (!buf) | ||
192 | return -ENOMEM; | ||
193 | p = buf; | ||
194 | |||
195 | p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); | ||
196 | p += sprintf(p, "-----------------------------------------\n"); | ||
197 | |||
198 | mutex_lock(&iommu_debug_lock); | ||
199 | |||
200 | bytes = PAGE_SIZE - (p - buf); | ||
201 | p += dump_ioptable(obj, p, bytes); | ||
202 | |||
203 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
204 | |||
205 | mutex_unlock(&iommu_debug_lock); | ||
206 | free_page((unsigned long)buf); | ||
207 | |||
208 | return bytes; | ||
209 | } | ||
210 | |||
211 | static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, | ||
212 | size_t count, loff_t *ppos) | ||
213 | { | ||
214 | struct iommu *obj = file->private_data; | ||
215 | char *p, *buf; | ||
216 | struct iovm_struct *tmp; | ||
217 | int uninitialized_var(i); | ||
218 | ssize_t bytes; | ||
219 | |||
220 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
221 | if (!buf) | ||
222 | return -ENOMEM; | ||
223 | p = buf; | ||
224 | |||
225 | p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n", | ||
226 | "No", "start", "end", "size", "flags"); | ||
227 | p += sprintf(p, "-------------------------------------------------\n"); | ||
228 | |||
229 | mutex_lock(&iommu_debug_lock); | ||
230 | |||
231 | list_for_each_entry(tmp, &obj->mmap, list) { | ||
232 | size_t len; | ||
233 | const char *str = "%3d %08x-%08x %6x %8x\n"; | ||
234 | const int maxcol = 39; | ||
235 | |||
236 | len = tmp->da_end - tmp->da_start; | ||
237 | p += snprintf(p, maxcol, str, | ||
238 | i, tmp->da_start, tmp->da_end, len, tmp->flags); | ||
239 | |||
240 | if (PAGE_SIZE - (p - buf) < maxcol) | ||
241 | break; | ||
242 | i++; | ||
243 | } | ||
244 | |||
245 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
246 | |||
247 | mutex_unlock(&iommu_debug_lock); | ||
248 | free_page((unsigned long)buf); | ||
249 | |||
250 | return bytes; | ||
251 | } | ||
252 | |||
253 | static ssize_t debug_read_mem(struct file *file, char __user *userbuf, | ||
254 | size_t count, loff_t *ppos) | ||
255 | { | ||
256 | struct iommu *obj = file->private_data; | ||
257 | char *p, *buf; | ||
258 | struct iovm_struct *area; | ||
259 | ssize_t bytes; | ||
260 | |||
261 | count = min_t(ssize_t, count, PAGE_SIZE); | ||
262 | |||
263 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
264 | if (!buf) | ||
265 | return -ENOMEM; | ||
266 | p = buf; | ||
267 | |||
268 | mutex_lock(&iommu_debug_lock); | ||
269 | |||
270 | area = find_iovm_area(obj, (u32)ppos); | ||
271 | if (IS_ERR(area)) { | ||
272 | bytes = -EINVAL; | ||
273 | goto err_out; | ||
274 | } | ||
275 | memcpy(p, area->va, count); | ||
276 | p += count; | ||
277 | |||
278 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
279 | err_out: | ||
280 | mutex_unlock(&iommu_debug_lock); | ||
281 | free_page((unsigned long)buf); | ||
282 | |||
283 | return bytes; | ||
284 | } | ||
285 | |||
286 | static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, | ||
287 | size_t count, loff_t *ppos) | ||
288 | { | ||
289 | struct iommu *obj = file->private_data; | ||
290 | struct iovm_struct *area; | ||
291 | char *p, *buf; | ||
292 | |||
293 | count = min_t(size_t, count, PAGE_SIZE); | ||
294 | |||
295 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
296 | if (!buf) | ||
297 | return -ENOMEM; | ||
298 | p = buf; | ||
299 | |||
300 | mutex_lock(&iommu_debug_lock); | ||
301 | |||
302 | if (copy_from_user(p, userbuf, count)) { | ||
303 | count = -EFAULT; | ||
304 | goto err_out; | ||
305 | } | ||
306 | |||
307 | area = find_iovm_area(obj, (u32)ppos); | ||
308 | if (IS_ERR(area)) { | ||
309 | count = -EINVAL; | ||
310 | goto err_out; | ||
311 | } | ||
312 | memcpy(area->va, p, count); | ||
313 | err_out: | ||
314 | mutex_unlock(&iommu_debug_lock); | ||
315 | free_page((unsigned long)buf); | ||
316 | |||
317 | return count; | ||
318 | } | ||
319 | |||
320 | static int debug_open_generic(struct inode *inode, struct file *file) | ||
321 | { | ||
322 | file->private_data = inode->i_private; | ||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | #define DEBUG_FOPS(name) \ | ||
327 | static const struct file_operations debug_##name##_fops = { \ | ||
328 | .open = debug_open_generic, \ | ||
329 | .read = debug_read_##name, \ | ||
330 | .write = debug_write_##name, \ | ||
331 | .llseek = generic_file_llseek, \ | ||
332 | }; | ||
333 | |||
334 | #define DEBUG_FOPS_RO(name) \ | ||
335 | static const struct file_operations debug_##name##_fops = { \ | ||
336 | .open = debug_open_generic, \ | ||
337 | .read = debug_read_##name, \ | ||
338 | .llseek = generic_file_llseek, \ | ||
339 | }; | ||
340 | |||
341 | DEBUG_FOPS_RO(ver); | ||
342 | DEBUG_FOPS_RO(regs); | ||
343 | DEBUG_FOPS_RO(tlb); | ||
344 | DEBUG_FOPS(pagetable); | ||
345 | DEBUG_FOPS_RO(mmap); | ||
346 | DEBUG_FOPS(mem); | ||
347 | |||
348 | #define __DEBUG_ADD_FILE(attr, mode) \ | ||
349 | { \ | ||
350 | struct dentry *dent; \ | ||
351 | dent = debugfs_create_file(#attr, mode, parent, \ | ||
352 | obj, &debug_##attr##_fops); \ | ||
353 | if (!dent) \ | ||
354 | return -ENOMEM; \ | ||
355 | } | ||
356 | |||
357 | #define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600) | ||
358 | #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400) | ||
359 | |||
360 | static int iommu_debug_register(struct device *dev, void *data) | ||
361 | { | ||
362 | struct platform_device *pdev = to_platform_device(dev); | ||
363 | struct iommu *obj = platform_get_drvdata(pdev); | ||
364 | struct dentry *d, *parent; | ||
365 | |||
366 | if (!obj || !obj->dev) | ||
367 | return -EINVAL; | ||
368 | |||
369 | d = debugfs_create_dir(obj->name, iommu_debug_root); | ||
370 | if (!d) | ||
371 | return -ENOMEM; | ||
372 | parent = d; | ||
373 | |||
374 | d = debugfs_create_u8("nr_tlb_entries", 400, parent, | ||
375 | (u8 *)&obj->nr_tlb_entries); | ||
376 | if (!d) | ||
377 | return -ENOMEM; | ||
378 | |||
379 | DEBUG_ADD_FILE_RO(ver); | ||
380 | DEBUG_ADD_FILE_RO(regs); | ||
381 | DEBUG_ADD_FILE_RO(tlb); | ||
382 | DEBUG_ADD_FILE(pagetable); | ||
383 | DEBUG_ADD_FILE_RO(mmap); | ||
384 | DEBUG_ADD_FILE(mem); | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static int __init iommu_debug_init(void) | ||
390 | { | ||
391 | struct dentry *d; | ||
392 | int err; | ||
393 | |||
394 | d = debugfs_create_dir("iommu", NULL); | ||
395 | if (!d) | ||
396 | return -ENOMEM; | ||
397 | iommu_debug_root = d; | ||
398 | |||
399 | err = foreach_iommu_device(d, iommu_debug_register); | ||
400 | if (err) | ||
401 | goto err_out; | ||
402 | return 0; | ||
403 | |||
404 | err_out: | ||
405 | debugfs_remove_recursive(iommu_debug_root); | ||
406 | return err; | ||
407 | } | ||
408 | module_init(iommu_debug_init) | ||
409 | |||
410 | static void __exit iommu_debugfs_exit(void) | ||
411 | { | ||
412 | debugfs_remove_recursive(iommu_debug_root); | ||
413 | } | ||
414 | module_exit(iommu_debugfs_exit) | ||
415 | |||
416 | MODULE_DESCRIPTION("omap iommu: debugfs interface"); | ||
417 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | ||
418 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c deleted file mode 100644 index 34fc31ee9081..000000000000 --- a/arch/arm/plat-omap/iommu.c +++ /dev/null | |||
@@ -1,1102 +0,0 @@ | |||
1 | /* | ||
2 | * omap iommu: tlb and pagetable primitives | ||
3 | * | ||
4 | * Copyright (C) 2008-2010 Nokia Corporation | ||
5 | * | ||
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, | ||
7 | * Paul Mundt and Toshihiro Kobayashi | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/err.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/ioport.h> | ||
19 | #include <linux/clk.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | |||
22 | #include <asm/cacheflush.h> | ||
23 | |||
24 | #include <plat/iommu.h> | ||
25 | |||
26 | #include "iopgtable.h" | ||
27 | |||
28 | #define for_each_iotlb_cr(obj, n, __i, cr) \ | ||
29 | for (__i = 0; \ | ||
30 | (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ | ||
31 | __i++) | ||
32 | |||
33 | /* accommodate the difference between omap1 and omap2/3 */ | ||
34 | static const struct iommu_functions *arch_iommu; | ||
35 | |||
36 | static struct platform_driver omap_iommu_driver; | ||
37 | static struct kmem_cache *iopte_cachep; | ||
38 | |||
39 | /** | ||
40 | * install_iommu_arch - Install archtecure specific iommu functions | ||
41 | * @ops: a pointer to architecture specific iommu functions | ||
42 | * | ||
43 | * There are several kind of iommu algorithm(tlb, pagetable) among | ||
44 | * omap series. This interface installs such an iommu algorighm. | ||
45 | **/ | ||
46 | int install_iommu_arch(const struct iommu_functions *ops) | ||
47 | { | ||
48 | if (arch_iommu) | ||
49 | return -EBUSY; | ||
50 | |||
51 | arch_iommu = ops; | ||
52 | return 0; | ||
53 | } | ||
54 | EXPORT_SYMBOL_GPL(install_iommu_arch); | ||
55 | |||
56 | /** | ||
57 | * uninstall_iommu_arch - Uninstall archtecure specific iommu functions | ||
58 | * @ops: a pointer to architecture specific iommu functions | ||
59 | * | ||
60 | * This interface uninstalls the iommu algorighm installed previously. | ||
61 | **/ | ||
62 | void uninstall_iommu_arch(const struct iommu_functions *ops) | ||
63 | { | ||
64 | if (arch_iommu != ops) | ||
65 | pr_err("%s: not your arch\n", __func__); | ||
66 | |||
67 | arch_iommu = NULL; | ||
68 | } | ||
69 | EXPORT_SYMBOL_GPL(uninstall_iommu_arch); | ||
70 | |||
71 | /** | ||
72 | * iommu_save_ctx - Save registers for pm off-mode support | ||
73 | * @obj: target iommu | ||
74 | **/ | ||
75 | void iommu_save_ctx(struct iommu *obj) | ||
76 | { | ||
77 | arch_iommu->save_ctx(obj); | ||
78 | } | ||
79 | EXPORT_SYMBOL_GPL(iommu_save_ctx); | ||
80 | |||
81 | /** | ||
82 | * iommu_restore_ctx - Restore registers for pm off-mode support | ||
83 | * @obj: target iommu | ||
84 | **/ | ||
85 | void iommu_restore_ctx(struct iommu *obj) | ||
86 | { | ||
87 | arch_iommu->restore_ctx(obj); | ||
88 | } | ||
89 | EXPORT_SYMBOL_GPL(iommu_restore_ctx); | ||
90 | |||
91 | /** | ||
92 | * iommu_arch_version - Return running iommu arch version | ||
93 | **/ | ||
94 | u32 iommu_arch_version(void) | ||
95 | { | ||
96 | return arch_iommu->version; | ||
97 | } | ||
98 | EXPORT_SYMBOL_GPL(iommu_arch_version); | ||
99 | |||
100 | static int iommu_enable(struct iommu *obj) | ||
101 | { | ||
102 | int err; | ||
103 | |||
104 | if (!obj) | ||
105 | return -EINVAL; | ||
106 | |||
107 | if (!arch_iommu) | ||
108 | return -ENODEV; | ||
109 | |||
110 | clk_enable(obj->clk); | ||
111 | |||
112 | err = arch_iommu->enable(obj); | ||
113 | |||
114 | clk_disable(obj->clk); | ||
115 | return err; | ||
116 | } | ||
117 | |||
118 | static void iommu_disable(struct iommu *obj) | ||
119 | { | ||
120 | if (!obj) | ||
121 | return; | ||
122 | |||
123 | clk_enable(obj->clk); | ||
124 | |||
125 | arch_iommu->disable(obj); | ||
126 | |||
127 | clk_disable(obj->clk); | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * TLB operations | ||
132 | */ | ||
133 | void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) | ||
134 | { | ||
135 | BUG_ON(!cr || !e); | ||
136 | |||
137 | arch_iommu->cr_to_e(cr, e); | ||
138 | } | ||
139 | EXPORT_SYMBOL_GPL(iotlb_cr_to_e); | ||
140 | |||
141 | static inline int iotlb_cr_valid(struct cr_regs *cr) | ||
142 | { | ||
143 | if (!cr) | ||
144 | return -EINVAL; | ||
145 | |||
146 | return arch_iommu->cr_valid(cr); | ||
147 | } | ||
148 | |||
149 | static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, | ||
150 | struct iotlb_entry *e) | ||
151 | { | ||
152 | if (!e) | ||
153 | return NULL; | ||
154 | |||
155 | return arch_iommu->alloc_cr(obj, e); | ||
156 | } | ||
157 | |||
158 | u32 iotlb_cr_to_virt(struct cr_regs *cr) | ||
159 | { | ||
160 | return arch_iommu->cr_to_virt(cr); | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(iotlb_cr_to_virt); | ||
163 | |||
164 | static u32 get_iopte_attr(struct iotlb_entry *e) | ||
165 | { | ||
166 | return arch_iommu->get_pte_attr(e); | ||
167 | } | ||
168 | |||
169 | static u32 iommu_report_fault(struct iommu *obj, u32 *da) | ||
170 | { | ||
171 | return arch_iommu->fault_isr(obj, da); | ||
172 | } | ||
173 | |||
174 | static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) | ||
175 | { | ||
176 | u32 val; | ||
177 | |||
178 | val = iommu_read_reg(obj, MMU_LOCK); | ||
179 | |||
180 | l->base = MMU_LOCK_BASE(val); | ||
181 | l->vict = MMU_LOCK_VICT(val); | ||
182 | |||
183 | } | ||
184 | |||
185 | static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) | ||
186 | { | ||
187 | u32 val; | ||
188 | |||
189 | val = (l->base << MMU_LOCK_BASE_SHIFT); | ||
190 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); | ||
191 | |||
192 | iommu_write_reg(obj, val, MMU_LOCK); | ||
193 | } | ||
194 | |||
195 | static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) | ||
196 | { | ||
197 | arch_iommu->tlb_read_cr(obj, cr); | ||
198 | } | ||
199 | |||
200 | static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) | ||
201 | { | ||
202 | arch_iommu->tlb_load_cr(obj, cr); | ||
203 | |||
204 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | ||
205 | iommu_write_reg(obj, 1, MMU_LD_TLB); | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * iotlb_dump_cr - Dump an iommu tlb entry into buf | ||
210 | * @obj: target iommu | ||
211 | * @cr: contents of cam and ram register | ||
212 | * @buf: output buffer | ||
213 | **/ | ||
214 | static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, | ||
215 | char *buf) | ||
216 | { | ||
217 | BUG_ON(!cr || !buf); | ||
218 | |||
219 | return arch_iommu->dump_cr(obj, cr, buf); | ||
220 | } | ||
221 | |||
222 | /* only used in iotlb iteration for-loop */ | ||
223 | static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n) | ||
224 | { | ||
225 | struct cr_regs cr; | ||
226 | struct iotlb_lock l; | ||
227 | |||
228 | iotlb_lock_get(obj, &l); | ||
229 | l.vict = n; | ||
230 | iotlb_lock_set(obj, &l); | ||
231 | iotlb_read_cr(obj, &cr); | ||
232 | |||
233 | return cr; | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * load_iotlb_entry - Set an iommu tlb entry | ||
238 | * @obj: target iommu | ||
239 | * @e: an iommu tlb entry info | ||
240 | **/ | ||
241 | int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) | ||
242 | { | ||
243 | int err = 0; | ||
244 | struct iotlb_lock l; | ||
245 | struct cr_regs *cr; | ||
246 | |||
247 | if (!obj || !obj->nr_tlb_entries || !e) | ||
248 | return -EINVAL; | ||
249 | |||
250 | clk_enable(obj->clk); | ||
251 | |||
252 | iotlb_lock_get(obj, &l); | ||
253 | if (l.base == obj->nr_tlb_entries) { | ||
254 | dev_warn(obj->dev, "%s: preserve entries full\n", __func__); | ||
255 | err = -EBUSY; | ||
256 | goto out; | ||
257 | } | ||
258 | if (!e->prsvd) { | ||
259 | int i; | ||
260 | struct cr_regs tmp; | ||
261 | |||
262 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) | ||
263 | if (!iotlb_cr_valid(&tmp)) | ||
264 | break; | ||
265 | |||
266 | if (i == obj->nr_tlb_entries) { | ||
267 | dev_dbg(obj->dev, "%s: full: no entry\n", __func__); | ||
268 | err = -EBUSY; | ||
269 | goto out; | ||
270 | } | ||
271 | |||
272 | iotlb_lock_get(obj, &l); | ||
273 | } else { | ||
274 | l.vict = l.base; | ||
275 | iotlb_lock_set(obj, &l); | ||
276 | } | ||
277 | |||
278 | cr = iotlb_alloc_cr(obj, e); | ||
279 | if (IS_ERR(cr)) { | ||
280 | clk_disable(obj->clk); | ||
281 | return PTR_ERR(cr); | ||
282 | } | ||
283 | |||
284 | iotlb_load_cr(obj, cr); | ||
285 | kfree(cr); | ||
286 | |||
287 | if (e->prsvd) | ||
288 | l.base++; | ||
289 | /* increment victim for next tlb load */ | ||
290 | if (++l.vict == obj->nr_tlb_entries) | ||
291 | l.vict = l.base; | ||
292 | iotlb_lock_set(obj, &l); | ||
293 | out: | ||
294 | clk_disable(obj->clk); | ||
295 | return err; | ||
296 | } | ||
297 | EXPORT_SYMBOL_GPL(load_iotlb_entry); | ||
298 | |||
299 | /** | ||
300 | * flush_iotlb_page - Clear an iommu tlb entry | ||
301 | * @obj: target iommu | ||
302 | * @da: iommu device virtual address | ||
303 | * | ||
304 | * Clear an iommu tlb entry which includes 'da' address. | ||
305 | **/ | ||
306 | void flush_iotlb_page(struct iommu *obj, u32 da) | ||
307 | { | ||
308 | int i; | ||
309 | struct cr_regs cr; | ||
310 | |||
311 | clk_enable(obj->clk); | ||
312 | |||
313 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { | ||
314 | u32 start; | ||
315 | size_t bytes; | ||
316 | |||
317 | if (!iotlb_cr_valid(&cr)) | ||
318 | continue; | ||
319 | |||
320 | start = iotlb_cr_to_virt(&cr); | ||
321 | bytes = iopgsz_to_bytes(cr.cam & 3); | ||
322 | |||
323 | if ((start <= da) && (da < start + bytes)) { | ||
324 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", | ||
325 | __func__, start, da, bytes); | ||
326 | iotlb_load_cr(obj, &cr); | ||
327 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | ||
328 | } | ||
329 | } | ||
330 | clk_disable(obj->clk); | ||
331 | |||
332 | if (i == obj->nr_tlb_entries) | ||
333 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); | ||
334 | } | ||
335 | EXPORT_SYMBOL_GPL(flush_iotlb_page); | ||
336 | |||
337 | /** | ||
338 | * flush_iotlb_range - Clear an iommu tlb entries | ||
339 | * @obj: target iommu | ||
340 | * @start: iommu device virtual address(start) | ||
341 | * @end: iommu device virtual address(end) | ||
342 | * | ||
343 | * Clear an iommu tlb entry which includes 'da' address. | ||
344 | **/ | ||
345 | void flush_iotlb_range(struct iommu *obj, u32 start, u32 end) | ||
346 | { | ||
347 | u32 da = start; | ||
348 | |||
349 | while (da < end) { | ||
350 | flush_iotlb_page(obj, da); | ||
351 | /* FIXME: Optimize for multiple page size */ | ||
352 | da += IOPTE_SIZE; | ||
353 | } | ||
354 | } | ||
355 | EXPORT_SYMBOL_GPL(flush_iotlb_range); | ||
356 | |||
357 | /** | ||
358 | * flush_iotlb_all - Clear all iommu tlb entries | ||
359 | * @obj: target iommu | ||
360 | **/ | ||
361 | void flush_iotlb_all(struct iommu *obj) | ||
362 | { | ||
363 | struct iotlb_lock l; | ||
364 | |||
365 | clk_enable(obj->clk); | ||
366 | |||
367 | l.base = 0; | ||
368 | l.vict = 0; | ||
369 | iotlb_lock_set(obj, &l); | ||
370 | |||
371 | iommu_write_reg(obj, 1, MMU_GFLUSH); | ||
372 | |||
373 | clk_disable(obj->clk); | ||
374 | } | ||
375 | EXPORT_SYMBOL_GPL(flush_iotlb_all); | ||
376 | |||
377 | /** | ||
378 | * iommu_set_twl - enable/disable table walking logic | ||
379 | * @obj: target iommu | ||
380 | * @on: enable/disable | ||
381 | * | ||
382 | * Function used to enable/disable TWL. If one wants to work | ||
383 | * exclusively with locked TLB entries and receive notifications | ||
384 | * for TLB miss then call this function to disable TWL. | ||
385 | */ | ||
386 | void iommu_set_twl(struct iommu *obj, bool on) | ||
387 | { | ||
388 | clk_enable(obj->clk); | ||
389 | arch_iommu->set_twl(obj, on); | ||
390 | clk_disable(obj->clk); | ||
391 | } | ||
392 | EXPORT_SYMBOL_GPL(iommu_set_twl); | ||
393 | |||
394 | #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) | ||
395 | |||
396 | ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes) | ||
397 | { | ||
398 | if (!obj || !buf) | ||
399 | return -EINVAL; | ||
400 | |||
401 | clk_enable(obj->clk); | ||
402 | |||
403 | bytes = arch_iommu->dump_ctx(obj, buf, bytes); | ||
404 | |||
405 | clk_disable(obj->clk); | ||
406 | |||
407 | return bytes; | ||
408 | } | ||
409 | EXPORT_SYMBOL_GPL(iommu_dump_ctx); | ||
410 | |||
411 | static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) | ||
412 | { | ||
413 | int i; | ||
414 | struct iotlb_lock saved; | ||
415 | struct cr_regs tmp; | ||
416 | struct cr_regs *p = crs; | ||
417 | |||
418 | clk_enable(obj->clk); | ||
419 | iotlb_lock_get(obj, &saved); | ||
420 | |||
421 | for_each_iotlb_cr(obj, num, i, tmp) { | ||
422 | if (!iotlb_cr_valid(&tmp)) | ||
423 | continue; | ||
424 | *p++ = tmp; | ||
425 | } | ||
426 | |||
427 | iotlb_lock_set(obj, &saved); | ||
428 | clk_disable(obj->clk); | ||
429 | |||
430 | return p - crs; | ||
431 | } | ||
432 | |||
433 | /** | ||
434 | * dump_tlb_entries - dump cr arrays to given buffer | ||
435 | * @obj: target iommu | ||
436 | * @buf: output buffer | ||
437 | **/ | ||
438 | size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) | ||
439 | { | ||
440 | int i, num; | ||
441 | struct cr_regs *cr; | ||
442 | char *p = buf; | ||
443 | |||
444 | num = bytes / sizeof(*cr); | ||
445 | num = min(obj->nr_tlb_entries, num); | ||
446 | |||
447 | cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); | ||
448 | if (!cr) | ||
449 | return 0; | ||
450 | |||
451 | num = __dump_tlb_entries(obj, cr, num); | ||
452 | for (i = 0; i < num; i++) | ||
453 | p += iotlb_dump_cr(obj, cr + i, p); | ||
454 | kfree(cr); | ||
455 | |||
456 | return p - buf; | ||
457 | } | ||
458 | EXPORT_SYMBOL_GPL(dump_tlb_entries); | ||
459 | |||
460 | int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) | ||
461 | { | ||
462 | return driver_for_each_device(&omap_iommu_driver.driver, | ||
463 | NULL, data, fn); | ||
464 | } | ||
465 | EXPORT_SYMBOL_GPL(foreach_iommu_device); | ||
466 | |||
467 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ | ||
468 | |||
469 | /* | ||
470 | * H/W pagetable operations | ||
471 | */ | ||
472 | static void flush_iopgd_range(u32 *first, u32 *last) | ||
473 | { | ||
474 | /* FIXME: L2 cache should be taken care of if it exists */ | ||
475 | do { | ||
476 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" | ||
477 | : : "r" (first)); | ||
478 | first += L1_CACHE_BYTES / sizeof(*first); | ||
479 | } while (first <= last); | ||
480 | } | ||
481 | |||
482 | static void flush_iopte_range(u32 *first, u32 *last) | ||
483 | { | ||
484 | /* FIXME: L2 cache should be taken care of if it exists */ | ||
485 | do { | ||
486 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" | ||
487 | : : "r" (first)); | ||
488 | first += L1_CACHE_BYTES / sizeof(*first); | ||
489 | } while (first <= last); | ||
490 | } | ||
491 | |||
492 | static void iopte_free(u32 *iopte) | ||
493 | { | ||
494 | /* Note: freed iopte's must be clean ready for re-use */ | ||
495 | kmem_cache_free(iopte_cachep, iopte); | ||
496 | } | ||
497 | |||
498 | static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) | ||
499 | { | ||
500 | u32 *iopte; | ||
501 | |||
502 | /* a table has already existed */ | ||
503 | if (*iopgd) | ||
504 | goto pte_ready; | ||
505 | |||
506 | /* | ||
507 | * do the allocation outside the page table lock | ||
508 | */ | ||
509 | spin_unlock(&obj->page_table_lock); | ||
510 | iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); | ||
511 | spin_lock(&obj->page_table_lock); | ||
512 | |||
513 | if (!*iopgd) { | ||
514 | if (!iopte) | ||
515 | return ERR_PTR(-ENOMEM); | ||
516 | |||
517 | *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; | ||
518 | flush_iopgd_range(iopgd, iopgd); | ||
519 | |||
520 | dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); | ||
521 | } else { | ||
522 | /* We raced, free the reduniovant table */ | ||
523 | iopte_free(iopte); | ||
524 | } | ||
525 | |||
526 | pte_ready: | ||
527 | iopte = iopte_offset(iopgd, da); | ||
528 | |||
529 | dev_vdbg(obj->dev, | ||
530 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", | ||
531 | __func__, da, iopgd, *iopgd, iopte, *iopte); | ||
532 | |||
533 | return iopte; | ||
534 | } | ||
535 | |||
536 | static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) | ||
537 | { | ||
538 | u32 *iopgd = iopgd_offset(obj, da); | ||
539 | |||
540 | if ((da | pa) & ~IOSECTION_MASK) { | ||
541 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | ||
542 | __func__, da, pa, IOSECTION_SIZE); | ||
543 | return -EINVAL; | ||
544 | } | ||
545 | |||
546 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; | ||
547 | flush_iopgd_range(iopgd, iopgd); | ||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) | ||
552 | { | ||
553 | u32 *iopgd = iopgd_offset(obj, da); | ||
554 | int i; | ||
555 | |||
556 | if ((da | pa) & ~IOSUPER_MASK) { | ||
557 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | ||
558 | __func__, da, pa, IOSUPER_SIZE); | ||
559 | return -EINVAL; | ||
560 | } | ||
561 | |||
562 | for (i = 0; i < 16; i++) | ||
563 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; | ||
564 | flush_iopgd_range(iopgd, iopgd + 15); | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) | ||
569 | { | ||
570 | u32 *iopgd = iopgd_offset(obj, da); | ||
571 | u32 *iopte = iopte_alloc(obj, iopgd, da); | ||
572 | |||
573 | if (IS_ERR(iopte)) | ||
574 | return PTR_ERR(iopte); | ||
575 | |||
576 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; | ||
577 | flush_iopte_range(iopte, iopte); | ||
578 | |||
579 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", | ||
580 | __func__, da, pa, iopte, *iopte); | ||
581 | |||
582 | return 0; | ||
583 | } | ||
584 | |||
585 | static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) | ||
586 | { | ||
587 | u32 *iopgd = iopgd_offset(obj, da); | ||
588 | u32 *iopte = iopte_alloc(obj, iopgd, da); | ||
589 | int i; | ||
590 | |||
591 | if ((da | pa) & ~IOLARGE_MASK) { | ||
592 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | ||
593 | __func__, da, pa, IOLARGE_SIZE); | ||
594 | return -EINVAL; | ||
595 | } | ||
596 | |||
597 | if (IS_ERR(iopte)) | ||
598 | return PTR_ERR(iopte); | ||
599 | |||
600 | for (i = 0; i < 16; i++) | ||
601 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; | ||
602 | flush_iopte_range(iopte, iopte + 15); | ||
603 | return 0; | ||
604 | } | ||
605 | |||
606 | static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) | ||
607 | { | ||
608 | int (*fn)(struct iommu *, u32, u32, u32); | ||
609 | u32 prot; | ||
610 | int err; | ||
611 | |||
612 | if (!obj || !e) | ||
613 | return -EINVAL; | ||
614 | |||
615 | switch (e->pgsz) { | ||
616 | case MMU_CAM_PGSZ_16M: | ||
617 | fn = iopgd_alloc_super; | ||
618 | break; | ||
619 | case MMU_CAM_PGSZ_1M: | ||
620 | fn = iopgd_alloc_section; | ||
621 | break; | ||
622 | case MMU_CAM_PGSZ_64K: | ||
623 | fn = iopte_alloc_large; | ||
624 | break; | ||
625 | case MMU_CAM_PGSZ_4K: | ||
626 | fn = iopte_alloc_page; | ||
627 | break; | ||
628 | default: | ||
629 | fn = NULL; | ||
630 | BUG(); | ||
631 | break; | ||
632 | } | ||
633 | |||
634 | prot = get_iopte_attr(e); | ||
635 | |||
636 | spin_lock(&obj->page_table_lock); | ||
637 | err = fn(obj, e->da, e->pa, prot); | ||
638 | spin_unlock(&obj->page_table_lock); | ||
639 | |||
640 | return err; | ||
641 | } | ||
642 | |||
643 | /** | ||
644 | * iopgtable_store_entry - Make an iommu pte entry | ||
645 | * @obj: target iommu | ||
646 | * @e: an iommu tlb entry info | ||
647 | **/ | ||
648 | int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) | ||
649 | { | ||
650 | int err; | ||
651 | |||
652 | flush_iotlb_page(obj, e->da); | ||
653 | err = iopgtable_store_entry_core(obj, e); | ||
654 | #ifdef PREFETCH_IOTLB | ||
655 | if (!err) | ||
656 | load_iotlb_entry(obj, e); | ||
657 | #endif | ||
658 | return err; | ||
659 | } | ||
660 | EXPORT_SYMBOL_GPL(iopgtable_store_entry); | ||
661 | |||
662 | /** | ||
663 | * iopgtable_lookup_entry - Lookup an iommu pte entry | ||
664 | * @obj: target iommu | ||
665 | * @da: iommu device virtual address | ||
666 | * @ppgd: iommu pgd entry pointer to be returned | ||
667 | * @ppte: iommu pte entry pointer to be returned | ||
668 | **/ | ||
669 | void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte) | ||
670 | { | ||
671 | u32 *iopgd, *iopte = NULL; | ||
672 | |||
673 | iopgd = iopgd_offset(obj, da); | ||
674 | if (!*iopgd) | ||
675 | goto out; | ||
676 | |||
677 | if (iopgd_is_table(*iopgd)) | ||
678 | iopte = iopte_offset(iopgd, da); | ||
679 | out: | ||
680 | *ppgd = iopgd; | ||
681 | *ppte = iopte; | ||
682 | } | ||
683 | EXPORT_SYMBOL_GPL(iopgtable_lookup_entry); | ||
684 | |||
685 | static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) | ||
686 | { | ||
687 | size_t bytes; | ||
688 | u32 *iopgd = iopgd_offset(obj, da); | ||
689 | int nent = 1; | ||
690 | |||
691 | if (!*iopgd) | ||
692 | return 0; | ||
693 | |||
694 | if (iopgd_is_table(*iopgd)) { | ||
695 | int i; | ||
696 | u32 *iopte = iopte_offset(iopgd, da); | ||
697 | |||
698 | bytes = IOPTE_SIZE; | ||
699 | if (*iopte & IOPTE_LARGE) { | ||
700 | nent *= 16; | ||
701 | /* rewind to the 1st entry */ | ||
702 | iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); | ||
703 | } | ||
704 | bytes *= nent; | ||
705 | memset(iopte, 0, nent * sizeof(*iopte)); | ||
706 | flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); | ||
707 | |||
708 | /* | ||
709 | * do table walk to check if this table is necessary or not | ||
710 | */ | ||
711 | iopte = iopte_offset(iopgd, 0); | ||
712 | for (i = 0; i < PTRS_PER_IOPTE; i++) | ||
713 | if (iopte[i]) | ||
714 | goto out; | ||
715 | |||
716 | iopte_free(iopte); | ||
717 | nent = 1; /* for the next L1 entry */ | ||
718 | } else { | ||
719 | bytes = IOPGD_SIZE; | ||
720 | if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { | ||
721 | nent *= 16; | ||
722 | /* rewind to the 1st entry */ | ||
723 | iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); | ||
724 | } | ||
725 | bytes *= nent; | ||
726 | } | ||
727 | memset(iopgd, 0, nent * sizeof(*iopgd)); | ||
728 | flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); | ||
729 | out: | ||
730 | return bytes; | ||
731 | } | ||
732 | |||
733 | /** | ||
734 | * iopgtable_clear_entry - Remove an iommu pte entry | ||
735 | * @obj: target iommu | ||
736 | * @da: iommu device virtual address | ||
737 | **/ | ||
738 | size_t iopgtable_clear_entry(struct iommu *obj, u32 da) | ||
739 | { | ||
740 | size_t bytes; | ||
741 | |||
742 | spin_lock(&obj->page_table_lock); | ||
743 | |||
744 | bytes = iopgtable_clear_entry_core(obj, da); | ||
745 | flush_iotlb_page(obj, da); | ||
746 | |||
747 | spin_unlock(&obj->page_table_lock); | ||
748 | |||
749 | return bytes; | ||
750 | } | ||
751 | EXPORT_SYMBOL_GPL(iopgtable_clear_entry); | ||
752 | |||
753 | static void iopgtable_clear_entry_all(struct iommu *obj) | ||
754 | { | ||
755 | int i; | ||
756 | |||
757 | spin_lock(&obj->page_table_lock); | ||
758 | |||
759 | for (i = 0; i < PTRS_PER_IOPGD; i++) { | ||
760 | u32 da; | ||
761 | u32 *iopgd; | ||
762 | |||
763 | da = i << IOPGD_SHIFT; | ||
764 | iopgd = iopgd_offset(obj, da); | ||
765 | |||
766 | if (!*iopgd) | ||
767 | continue; | ||
768 | |||
769 | if (iopgd_is_table(*iopgd)) | ||
770 | iopte_free(iopte_offset(iopgd, 0)); | ||
771 | |||
772 | *iopgd = 0; | ||
773 | flush_iopgd_range(iopgd, iopgd); | ||
774 | } | ||
775 | |||
776 | flush_iotlb_all(obj); | ||
777 | |||
778 | spin_unlock(&obj->page_table_lock); | ||
779 | } | ||
780 | |||
781 | /* | ||
782 | * Device IOMMU generic operations | ||
783 | */ | ||
784 | static irqreturn_t iommu_fault_handler(int irq, void *data) | ||
785 | { | ||
786 | u32 da, errs; | ||
787 | u32 *iopgd, *iopte; | ||
788 | struct iommu *obj = data; | ||
789 | |||
790 | if (!obj->refcount) | ||
791 | return IRQ_NONE; | ||
792 | |||
793 | clk_enable(obj->clk); | ||
794 | errs = iommu_report_fault(obj, &da); | ||
795 | clk_disable(obj->clk); | ||
796 | if (errs == 0) | ||
797 | return IRQ_HANDLED; | ||
798 | |||
799 | /* Fault callback or TLB/PTE Dynamic loading */ | ||
800 | if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) | ||
801 | return IRQ_HANDLED; | ||
802 | |||
803 | iommu_disable(obj); | ||
804 | |||
805 | iopgd = iopgd_offset(obj, da); | ||
806 | |||
807 | if (!iopgd_is_table(*iopgd)) { | ||
808 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " | ||
809 | "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); | ||
810 | return IRQ_NONE; | ||
811 | } | ||
812 | |||
813 | iopte = iopte_offset(iopgd, da); | ||
814 | |||
815 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " | ||
816 | "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, | ||
817 | iopte, *iopte); | ||
818 | |||
819 | return IRQ_NONE; | ||
820 | } | ||
821 | |||
822 | static int device_match_by_alias(struct device *dev, void *data) | ||
823 | { | ||
824 | struct iommu *obj = to_iommu(dev); | ||
825 | const char *name = data; | ||
826 | |||
827 | pr_debug("%s: %s %s\n", __func__, obj->name, name); | ||
828 | |||
829 | return strcmp(obj->name, name) == 0; | ||
830 | } | ||
831 | |||
832 | /** | ||
833 | * iommu_set_da_range - Set a valid device address range | ||
834 | * @obj: target iommu | ||
835 | * @start Start of valid range | ||
836 | * @end End of valid range | ||
837 | **/ | ||
838 | int iommu_set_da_range(struct iommu *obj, u32 start, u32 end) | ||
839 | { | ||
840 | |||
841 | if (!obj) | ||
842 | return -EFAULT; | ||
843 | |||
844 | if (end < start || !PAGE_ALIGN(start | end)) | ||
845 | return -EINVAL; | ||
846 | |||
847 | obj->da_start = start; | ||
848 | obj->da_end = end; | ||
849 | |||
850 | return 0; | ||
851 | } | ||
852 | EXPORT_SYMBOL_GPL(iommu_set_da_range); | ||
853 | |||
854 | /** | ||
855 | * iommu_get - Get iommu handler | ||
856 | * @name: target iommu name | ||
857 | **/ | ||
858 | struct iommu *iommu_get(const char *name) | ||
859 | { | ||
860 | int err = -ENOMEM; | ||
861 | struct device *dev; | ||
862 | struct iommu *obj; | ||
863 | |||
864 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, | ||
865 | device_match_by_alias); | ||
866 | if (!dev) | ||
867 | return ERR_PTR(-ENODEV); | ||
868 | |||
869 | obj = to_iommu(dev); | ||
870 | |||
871 | mutex_lock(&obj->iommu_lock); | ||
872 | |||
873 | if (obj->refcount++ == 0) { | ||
874 | err = iommu_enable(obj); | ||
875 | if (err) | ||
876 | goto err_enable; | ||
877 | flush_iotlb_all(obj); | ||
878 | } | ||
879 | |||
880 | if (!try_module_get(obj->owner)) | ||
881 | goto err_module; | ||
882 | |||
883 | mutex_unlock(&obj->iommu_lock); | ||
884 | |||
885 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | ||
886 | return obj; | ||
887 | |||
888 | err_module: | ||
889 | if (obj->refcount == 1) | ||
890 | iommu_disable(obj); | ||
891 | err_enable: | ||
892 | obj->refcount--; | ||
893 | mutex_unlock(&obj->iommu_lock); | ||
894 | return ERR_PTR(err); | ||
895 | } | ||
896 | EXPORT_SYMBOL_GPL(iommu_get); | ||
897 | |||
898 | /** | ||
899 | * iommu_put - Put back iommu handler | ||
900 | * @obj: target iommu | ||
901 | **/ | ||
902 | void iommu_put(struct iommu *obj) | ||
903 | { | ||
904 | if (!obj || IS_ERR(obj)) | ||
905 | return; | ||
906 | |||
907 | mutex_lock(&obj->iommu_lock); | ||
908 | |||
909 | if (--obj->refcount == 0) | ||
910 | iommu_disable(obj); | ||
911 | |||
912 | module_put(obj->owner); | ||
913 | |||
914 | mutex_unlock(&obj->iommu_lock); | ||
915 | |||
916 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | ||
917 | } | ||
918 | EXPORT_SYMBOL_GPL(iommu_put); | ||
919 | |||
920 | int iommu_set_isr(const char *name, | ||
921 | int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, | ||
922 | void *priv), | ||
923 | void *isr_priv) | ||
924 | { | ||
925 | struct device *dev; | ||
926 | struct iommu *obj; | ||
927 | |||
928 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, | ||
929 | device_match_by_alias); | ||
930 | if (!dev) | ||
931 | return -ENODEV; | ||
932 | |||
933 | obj = to_iommu(dev); | ||
934 | mutex_lock(&obj->iommu_lock); | ||
935 | if (obj->refcount != 0) { | ||
936 | mutex_unlock(&obj->iommu_lock); | ||
937 | return -EBUSY; | ||
938 | } | ||
939 | obj->isr = isr; | ||
940 | obj->isr_priv = isr_priv; | ||
941 | mutex_unlock(&obj->iommu_lock); | ||
942 | |||
943 | return 0; | ||
944 | } | ||
945 | EXPORT_SYMBOL_GPL(iommu_set_isr); | ||
946 | |||
947 | /* | ||
948 | * OMAP Device MMU(IOMMU) detection | ||
949 | */ | ||
950 | static int __devinit omap_iommu_probe(struct platform_device *pdev) | ||
951 | { | ||
952 | int err = -ENODEV; | ||
953 | void *p; | ||
954 | int irq; | ||
955 | struct iommu *obj; | ||
956 | struct resource *res; | ||
957 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | ||
958 | |||
959 | if (pdev->num_resources != 2) | ||
960 | return -EINVAL; | ||
961 | |||
962 | obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); | ||
963 | if (!obj) | ||
964 | return -ENOMEM; | ||
965 | |||
966 | obj->clk = clk_get(&pdev->dev, pdata->clk_name); | ||
967 | if (IS_ERR(obj->clk)) | ||
968 | goto err_clk; | ||
969 | |||
970 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | ||
971 | obj->name = pdata->name; | ||
972 | obj->dev = &pdev->dev; | ||
973 | obj->ctx = (void *)obj + sizeof(*obj); | ||
974 | obj->da_start = pdata->da_start; | ||
975 | obj->da_end = pdata->da_end; | ||
976 | |||
977 | mutex_init(&obj->iommu_lock); | ||
978 | mutex_init(&obj->mmap_lock); | ||
979 | spin_lock_init(&obj->page_table_lock); | ||
980 | INIT_LIST_HEAD(&obj->mmap); | ||
981 | |||
982 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
983 | if (!res) { | ||
984 | err = -ENODEV; | ||
985 | goto err_mem; | ||
986 | } | ||
987 | |||
988 | res = request_mem_region(res->start, resource_size(res), | ||
989 | dev_name(&pdev->dev)); | ||
990 | if (!res) { | ||
991 | err = -EIO; | ||
992 | goto err_mem; | ||
993 | } | ||
994 | |||
995 | obj->regbase = ioremap(res->start, resource_size(res)); | ||
996 | if (!obj->regbase) { | ||
997 | err = -ENOMEM; | ||
998 | goto err_ioremap; | ||
999 | } | ||
1000 | |||
1001 | irq = platform_get_irq(pdev, 0); | ||
1002 | if (irq < 0) { | ||
1003 | err = -ENODEV; | ||
1004 | goto err_irq; | ||
1005 | } | ||
1006 | err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, | ||
1007 | dev_name(&pdev->dev), obj); | ||
1008 | if (err < 0) | ||
1009 | goto err_irq; | ||
1010 | platform_set_drvdata(pdev, obj); | ||
1011 | |||
1012 | p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE)); | ||
1013 | if (!p) { | ||
1014 | err = -ENOMEM; | ||
1015 | goto err_pgd; | ||
1016 | } | ||
1017 | memset(p, 0, IOPGD_TABLE_SIZE); | ||
1018 | clean_dcache_area(p, IOPGD_TABLE_SIZE); | ||
1019 | obj->iopgd = p; | ||
1020 | |||
1021 | BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE)); | ||
1022 | |||
1023 | dev_info(&pdev->dev, "%s registered\n", obj->name); | ||
1024 | return 0; | ||
1025 | |||
1026 | err_pgd: | ||
1027 | free_irq(irq, obj); | ||
1028 | err_irq: | ||
1029 | iounmap(obj->regbase); | ||
1030 | err_ioremap: | ||
1031 | release_mem_region(res->start, resource_size(res)); | ||
1032 | err_mem: | ||
1033 | clk_put(obj->clk); | ||
1034 | err_clk: | ||
1035 | kfree(obj); | ||
1036 | return err; | ||
1037 | } | ||
1038 | |||
1039 | static int __devexit omap_iommu_remove(struct platform_device *pdev) | ||
1040 | { | ||
1041 | int irq; | ||
1042 | struct resource *res; | ||
1043 | struct iommu *obj = platform_get_drvdata(pdev); | ||
1044 | |||
1045 | platform_set_drvdata(pdev, NULL); | ||
1046 | |||
1047 | iopgtable_clear_entry_all(obj); | ||
1048 | free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE)); | ||
1049 | |||
1050 | irq = platform_get_irq(pdev, 0); | ||
1051 | free_irq(irq, obj); | ||
1052 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1053 | release_mem_region(res->start, resource_size(res)); | ||
1054 | iounmap(obj->regbase); | ||
1055 | |||
1056 | clk_put(obj->clk); | ||
1057 | dev_info(&pdev->dev, "%s removed\n", obj->name); | ||
1058 | kfree(obj); | ||
1059 | return 0; | ||
1060 | } | ||
1061 | |||
1062 | static struct platform_driver omap_iommu_driver = { | ||
1063 | .probe = omap_iommu_probe, | ||
1064 | .remove = __devexit_p(omap_iommu_remove), | ||
1065 | .driver = { | ||
1066 | .name = "omap-iommu", | ||
1067 | }, | ||
1068 | }; | ||
1069 | |||
1070 | static void iopte_cachep_ctor(void *iopte) | ||
1071 | { | ||
1072 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); | ||
1073 | } | ||
1074 | |||
1075 | static int __init omap_iommu_init(void) | ||
1076 | { | ||
1077 | struct kmem_cache *p; | ||
1078 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | ||
1079 | size_t align = 1 << 10; /* L2 pagetable alignement */ | ||
1080 | |||
1081 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | ||
1082 | iopte_cachep_ctor); | ||
1083 | if (!p) | ||
1084 | return -ENOMEM; | ||
1085 | iopte_cachep = p; | ||
1086 | |||
1087 | return platform_driver_register(&omap_iommu_driver); | ||
1088 | } | ||
1089 | module_init(omap_iommu_init); | ||
1090 | |||
1091 | static void __exit omap_iommu_exit(void) | ||
1092 | { | ||
1093 | kmem_cache_destroy(iopte_cachep); | ||
1094 | |||
1095 | platform_driver_unregister(&omap_iommu_driver); | ||
1096 | } | ||
1097 | module_exit(omap_iommu_exit); | ||
1098 | |||
1099 | MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); | ||
1100 | MODULE_ALIAS("platform:omap-iommu"); | ||
1101 | MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); | ||
1102 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c deleted file mode 100644 index 79e7fedb8602..000000000000 --- a/arch/arm/plat-omap/iovmm.c +++ /dev/null | |||
@@ -1,904 +0,0 @@ | |||
1 | /* | ||
2 | * omap iommu: simple virtual address space management | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 Nokia Corporation | ||
5 | * | ||
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/err.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/device.h> | ||
17 | #include <linux/scatterlist.h> | ||
18 | |||
19 | #include <asm/cacheflush.h> | ||
20 | #include <asm/mach/map.h> | ||
21 | |||
22 | #include <plat/iommu.h> | ||
23 | #include <plat/iovmm.h> | ||
24 | |||
25 | #include "iopgtable.h" | ||
26 | |||
27 | /* | ||
28 | * A device driver needs to create address mappings between: | ||
29 | * | ||
30 | * - iommu/device address | ||
31 | * - physical address | ||
32 | * - mpu virtual address | ||
33 | * | ||
34 | * There are 4 possible patterns for them: | ||
35 | * | ||
36 | * |iova/ mapping iommu_ page | ||
37 | * | da pa va (d)-(p)-(v) function type | ||
38 | * --------------------------------------------------------------------------- | ||
39 | * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s | ||
40 | * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s | ||
41 | * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s | ||
42 | * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n* | ||
43 | * | ||
44 | * | ||
45 | * 'iova': device iommu virtual address | ||
46 | * 'da': alias of 'iova' | ||
47 | * 'pa': physical address | ||
48 | * 'va': mpu virtual address | ||
49 | * | ||
50 | * 'c': contiguous memory area | ||
51 | * 'd': discontiguous memory area | ||
52 | * 'a': anonymous memory allocation | ||
53 | * '()': optional feature | ||
54 | * | ||
55 | * 'n': a normal page(4KB) size is used. | ||
56 | * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used. | ||
57 | * | ||
58 | * '*': not yet, but feasible. | ||
59 | */ | ||
60 | |||
61 | static struct kmem_cache *iovm_area_cachep; | ||
62 | |||
63 | /* return total bytes of sg buffers */ | ||
64 | static size_t sgtable_len(const struct sg_table *sgt) | ||
65 | { | ||
66 | unsigned int i, total = 0; | ||
67 | struct scatterlist *sg; | ||
68 | |||
69 | if (!sgt) | ||
70 | return 0; | ||
71 | |||
72 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
73 | size_t bytes; | ||
74 | |||
75 | bytes = sg->length; | ||
76 | |||
77 | if (!iopgsz_ok(bytes)) { | ||
78 | pr_err("%s: sg[%d] not iommu pagesize(%x)\n", | ||
79 | __func__, i, bytes); | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | total += bytes; | ||
84 | } | ||
85 | |||
86 | return total; | ||
87 | } | ||
88 | #define sgtable_ok(x) (!!sgtable_len(x)) | ||
89 | |||
90 | static unsigned max_alignment(u32 addr) | ||
91 | { | ||
92 | int i; | ||
93 | unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | ||
94 | for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) | ||
95 | ; | ||
96 | return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * calculate the optimal number sg elements from total bytes based on | ||
101 | * iommu superpages | ||
102 | */ | ||
103 | static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) | ||
104 | { | ||
105 | unsigned nr_entries = 0, ent_sz; | ||
106 | |||
107 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { | ||
108 | pr_err("%s: wrong size %08x\n", __func__, bytes); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | while (bytes) { | ||
113 | ent_sz = max_alignment(da | pa); | ||
114 | ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); | ||
115 | nr_entries++; | ||
116 | da += ent_sz; | ||
117 | pa += ent_sz; | ||
118 | bytes -= ent_sz; | ||
119 | } | ||
120 | |||
121 | return nr_entries; | ||
122 | } | ||
123 | |||
124 | /* allocate and initialize sg_table header(a kind of 'superblock') */ | ||
125 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, | ||
126 | u32 da, u32 pa) | ||
127 | { | ||
128 | unsigned int nr_entries; | ||
129 | int err; | ||
130 | struct sg_table *sgt; | ||
131 | |||
132 | if (!bytes) | ||
133 | return ERR_PTR(-EINVAL); | ||
134 | |||
135 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) | ||
136 | return ERR_PTR(-EINVAL); | ||
137 | |||
138 | if (flags & IOVMF_LINEAR) { | ||
139 | nr_entries = sgtable_nents(bytes, da, pa); | ||
140 | if (!nr_entries) | ||
141 | return ERR_PTR(-EINVAL); | ||
142 | } else | ||
143 | nr_entries = bytes / PAGE_SIZE; | ||
144 | |||
145 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | ||
146 | if (!sgt) | ||
147 | return ERR_PTR(-ENOMEM); | ||
148 | |||
149 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); | ||
150 | if (err) { | ||
151 | kfree(sgt); | ||
152 | return ERR_PTR(err); | ||
153 | } | ||
154 | |||
155 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); | ||
156 | |||
157 | return sgt; | ||
158 | } | ||
159 | |||
160 | /* free sg_table header(a kind of superblock) */ | ||
161 | static void sgtable_free(struct sg_table *sgt) | ||
162 | { | ||
163 | if (!sgt) | ||
164 | return; | ||
165 | |||
166 | sg_free_table(sgt); | ||
167 | kfree(sgt); | ||
168 | |||
169 | pr_debug("%s: sgt:%p\n", __func__, sgt); | ||
170 | } | ||
171 | |||
172 | /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ | ||
173 | static void *vmap_sg(const struct sg_table *sgt) | ||
174 | { | ||
175 | u32 va; | ||
176 | size_t total; | ||
177 | unsigned int i; | ||
178 | struct scatterlist *sg; | ||
179 | struct vm_struct *new; | ||
180 | const struct mem_type *mtype; | ||
181 | |||
182 | mtype = get_mem_type(MT_DEVICE); | ||
183 | if (!mtype) | ||
184 | return ERR_PTR(-EINVAL); | ||
185 | |||
186 | total = sgtable_len(sgt); | ||
187 | if (!total) | ||
188 | return ERR_PTR(-EINVAL); | ||
189 | |||
190 | new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); | ||
191 | if (!new) | ||
192 | return ERR_PTR(-ENOMEM); | ||
193 | va = (u32)new->addr; | ||
194 | |||
195 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
196 | size_t bytes; | ||
197 | u32 pa; | ||
198 | int err; | ||
199 | |||
200 | pa = sg_phys(sg); | ||
201 | bytes = sg->length; | ||
202 | |||
203 | BUG_ON(bytes != PAGE_SIZE); | ||
204 | |||
205 | err = ioremap_page(va, pa, mtype); | ||
206 | if (err) | ||
207 | goto err_out; | ||
208 | |||
209 | va += bytes; | ||
210 | } | ||
211 | |||
212 | flush_cache_vmap((unsigned long)new->addr, | ||
213 | (unsigned long)(new->addr + total)); | ||
214 | return new->addr; | ||
215 | |||
216 | err_out: | ||
217 | WARN_ON(1); /* FIXME: cleanup some mpu mappings */ | ||
218 | vunmap(new->addr); | ||
219 | return ERR_PTR(-EAGAIN); | ||
220 | } | ||
221 | |||
222 | static inline void vunmap_sg(const void *va) | ||
223 | { | ||
224 | vunmap(va); | ||
225 | } | ||
226 | |||
227 | static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) | ||
228 | { | ||
229 | struct iovm_struct *tmp; | ||
230 | |||
231 | list_for_each_entry(tmp, &obj->mmap, list) { | ||
232 | if ((da >= tmp->da_start) && (da < tmp->da_end)) { | ||
233 | size_t len; | ||
234 | |||
235 | len = tmp->da_end - tmp->da_start; | ||
236 | |||
237 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", | ||
238 | __func__, tmp->da_start, da, tmp->da_end, len, | ||
239 | tmp->flags); | ||
240 | |||
241 | return tmp; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | return NULL; | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * find_iovm_area - find iovma which includes @da | ||
250 | * @da: iommu device virtual address | ||
251 | * | ||
252 | * Find the existing iovma starting at @da | ||
253 | */ | ||
254 | struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) | ||
255 | { | ||
256 | struct iovm_struct *area; | ||
257 | |||
258 | mutex_lock(&obj->mmap_lock); | ||
259 | area = __find_iovm_area(obj, da); | ||
260 | mutex_unlock(&obj->mmap_lock); | ||
261 | |||
262 | return area; | ||
263 | } | ||
264 | EXPORT_SYMBOL_GPL(find_iovm_area); | ||
265 | |||
266 | /* | ||
267 | * This finds the hole(area) which fits the requested address and len | ||
268 | * in iovmas mmap, and returns the new allocated iovma. | ||
269 | */ | ||
270 | static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, | ||
271 | size_t bytes, u32 flags) | ||
272 | { | ||
273 | struct iovm_struct *new, *tmp; | ||
274 | u32 start, prev_end, alignment; | ||
275 | |||
276 | if (!obj || !bytes) | ||
277 | return ERR_PTR(-EINVAL); | ||
278 | |||
279 | start = da; | ||
280 | alignment = PAGE_SIZE; | ||
281 | |||
282 | if (~flags & IOVMF_DA_FIXED) { | ||
283 | /* Don't map address 0 */ | ||
284 | start = obj->da_start ? obj->da_start : alignment; | ||
285 | |||
286 | if (flags & IOVMF_LINEAR) | ||
287 | alignment = iopgsz_max(bytes); | ||
288 | start = roundup(start, alignment); | ||
289 | } else if (start < obj->da_start || start > obj->da_end || | ||
290 | obj->da_end - start < bytes) { | ||
291 | return ERR_PTR(-EINVAL); | ||
292 | } | ||
293 | |||
294 | tmp = NULL; | ||
295 | if (list_empty(&obj->mmap)) | ||
296 | goto found; | ||
297 | |||
298 | prev_end = 0; | ||
299 | list_for_each_entry(tmp, &obj->mmap, list) { | ||
300 | |||
301 | if (prev_end > start) | ||
302 | break; | ||
303 | |||
304 | if (tmp->da_start > start && (tmp->da_start - start) >= bytes) | ||
305 | goto found; | ||
306 | |||
307 | if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) | ||
308 | start = roundup(tmp->da_end + 1, alignment); | ||
309 | |||
310 | prev_end = tmp->da_end; | ||
311 | } | ||
312 | |||
313 | if ((start >= prev_end) && (obj->da_end - start >= bytes)) | ||
314 | goto found; | ||
315 | |||
316 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", | ||
317 | __func__, da, bytes, flags); | ||
318 | |||
319 | return ERR_PTR(-EINVAL); | ||
320 | |||
321 | found: | ||
322 | new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); | ||
323 | if (!new) | ||
324 | return ERR_PTR(-ENOMEM); | ||
325 | |||
326 | new->iommu = obj; | ||
327 | new->da_start = start; | ||
328 | new->da_end = start + bytes; | ||
329 | new->flags = flags; | ||
330 | |||
331 | /* | ||
332 | * keep ascending order of iovmas | ||
333 | */ | ||
334 | if (tmp) | ||
335 | list_add_tail(&new->list, &tmp->list); | ||
336 | else | ||
337 | list_add(&new->list, &obj->mmap); | ||
338 | |||
339 | dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", | ||
340 | __func__, new->da_start, start, new->da_end, bytes, flags); | ||
341 | |||
342 | return new; | ||
343 | } | ||
344 | |||
345 | static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) | ||
346 | { | ||
347 | size_t bytes; | ||
348 | |||
349 | BUG_ON(!obj || !area); | ||
350 | |||
351 | bytes = area->da_end - area->da_start; | ||
352 | |||
353 | dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", | ||
354 | __func__, area->da_start, area->da_end, bytes, area->flags); | ||
355 | |||
356 | list_del(&area->list); | ||
357 | kmem_cache_free(iovm_area_cachep, area); | ||
358 | } | ||
359 | |||
360 | /** | ||
361 | * da_to_va - convert (d) to (v) | ||
362 | * @obj: objective iommu | ||
363 | * @da: iommu device virtual address | ||
364 | * @va: mpu virtual address | ||
365 | * | ||
366 | * Returns mpu virtual addr which corresponds to a given device virtual addr | ||
367 | */ | ||
368 | void *da_to_va(struct iommu *obj, u32 da) | ||
369 | { | ||
370 | void *va = NULL; | ||
371 | struct iovm_struct *area; | ||
372 | |||
373 | mutex_lock(&obj->mmap_lock); | ||
374 | |||
375 | area = __find_iovm_area(obj, da); | ||
376 | if (!area) { | ||
377 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | ||
378 | goto out; | ||
379 | } | ||
380 | va = area->va; | ||
381 | out: | ||
382 | mutex_unlock(&obj->mmap_lock); | ||
383 | |||
384 | return va; | ||
385 | } | ||
386 | EXPORT_SYMBOL_GPL(da_to_va); | ||
387 | |||
388 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | ||
389 | { | ||
390 | unsigned int i; | ||
391 | struct scatterlist *sg; | ||
392 | void *va = _va; | ||
393 | void *va_end; | ||
394 | |||
395 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
396 | struct page *pg; | ||
397 | const size_t bytes = PAGE_SIZE; | ||
398 | |||
399 | /* | ||
400 | * iommu 'superpage' isn't supported with 'iommu_vmalloc()' | ||
401 | */ | ||
402 | pg = vmalloc_to_page(va); | ||
403 | BUG_ON(!pg); | ||
404 | sg_set_page(sg, pg, bytes, 0); | ||
405 | |||
406 | va += bytes; | ||
407 | } | ||
408 | |||
409 | va_end = _va + PAGE_SIZE * i; | ||
410 | } | ||
411 | |||
412 | static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | ||
413 | { | ||
414 | /* | ||
415 | * Actually this is not necessary at all, just exists for | ||
416 | * consistency of the code readability. | ||
417 | */ | ||
418 | BUG_ON(!sgt); | ||
419 | } | ||
420 | |||
421 | static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da, | ||
422 | size_t len) | ||
423 | { | ||
424 | unsigned int i; | ||
425 | struct scatterlist *sg; | ||
426 | |||
427 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
428 | unsigned bytes; | ||
429 | |||
430 | bytes = max_alignment(da | pa); | ||
431 | bytes = min_t(unsigned, bytes, iopgsz_max(len)); | ||
432 | |||
433 | BUG_ON(!iopgsz_ok(bytes)); | ||
434 | |||
435 | sg_set_buf(sg, phys_to_virt(pa), bytes); | ||
436 | /* | ||
437 | * 'pa' is cotinuous(linear). | ||
438 | */ | ||
439 | pa += bytes; | ||
440 | da += bytes; | ||
441 | len -= bytes; | ||
442 | } | ||
443 | BUG_ON(len); | ||
444 | } | ||
445 | |||
446 | static inline void sgtable_drain_kmalloc(struct sg_table *sgt) | ||
447 | { | ||
448 | /* | ||
449 | * Actually this is not necessary at all, just exists for | ||
450 | * consistency of the code readability | ||
451 | */ | ||
452 | BUG_ON(!sgt); | ||
453 | } | ||
454 | |||
455 | /* create 'da' <-> 'pa' mapping from 'sgt' */ | ||
456 | static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, | ||
457 | const struct sg_table *sgt, u32 flags) | ||
458 | { | ||
459 | int err; | ||
460 | unsigned int i, j; | ||
461 | struct scatterlist *sg; | ||
462 | u32 da = new->da_start; | ||
463 | |||
464 | if (!obj || !sgt) | ||
465 | return -EINVAL; | ||
466 | |||
467 | BUG_ON(!sgtable_ok(sgt)); | ||
468 | |||
469 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
470 | u32 pa; | ||
471 | int pgsz; | ||
472 | size_t bytes; | ||
473 | struct iotlb_entry e; | ||
474 | |||
475 | pa = sg_phys(sg); | ||
476 | bytes = sg->length; | ||
477 | |||
478 | flags &= ~IOVMF_PGSZ_MASK; | ||
479 | pgsz = bytes_to_iopgsz(bytes); | ||
480 | if (pgsz < 0) | ||
481 | goto err_out; | ||
482 | flags |= pgsz; | ||
483 | |||
484 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, | ||
485 | i, da, pa, bytes); | ||
486 | |||
487 | iotlb_init_entry(&e, da, pa, flags); | ||
488 | err = iopgtable_store_entry(obj, &e); | ||
489 | if (err) | ||
490 | goto err_out; | ||
491 | |||
492 | da += bytes; | ||
493 | } | ||
494 | return 0; | ||
495 | |||
496 | err_out: | ||
497 | da = new->da_start; | ||
498 | |||
499 | for_each_sg(sgt->sgl, sg, i, j) { | ||
500 | size_t bytes; | ||
501 | |||
502 | bytes = iopgtable_clear_entry(obj, da); | ||
503 | |||
504 | BUG_ON(!iopgsz_ok(bytes)); | ||
505 | |||
506 | da += bytes; | ||
507 | } | ||
508 | return err; | ||
509 | } | ||
510 | |||
511 | /* release 'da' <-> 'pa' mapping */ | ||
512 | static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) | ||
513 | { | ||
514 | u32 start; | ||
515 | size_t total = area->da_end - area->da_start; | ||
516 | |||
517 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); | ||
518 | |||
519 | start = area->da_start; | ||
520 | while (total > 0) { | ||
521 | size_t bytes; | ||
522 | |||
523 | bytes = iopgtable_clear_entry(obj, start); | ||
524 | if (bytes == 0) | ||
525 | bytes = PAGE_SIZE; | ||
526 | else | ||
527 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", | ||
528 | __func__, start, bytes, area->flags); | ||
529 | |||
530 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | ||
531 | |||
532 | total -= bytes; | ||
533 | start += bytes; | ||
534 | } | ||
535 | BUG_ON(total); | ||
536 | } | ||
537 | |||
538 | /* template function for all unmapping */ | ||
539 | static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, | ||
540 | void (*fn)(const void *), u32 flags) | ||
541 | { | ||
542 | struct sg_table *sgt = NULL; | ||
543 | struct iovm_struct *area; | ||
544 | |||
545 | if (!IS_ALIGNED(da, PAGE_SIZE)) { | ||
546 | dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); | ||
547 | return NULL; | ||
548 | } | ||
549 | |||
550 | mutex_lock(&obj->mmap_lock); | ||
551 | |||
552 | area = __find_iovm_area(obj, da); | ||
553 | if (!area) { | ||
554 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | ||
555 | goto out; | ||
556 | } | ||
557 | |||
558 | if ((area->flags & flags) != flags) { | ||
559 | dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, | ||
560 | area->flags); | ||
561 | goto out; | ||
562 | } | ||
563 | sgt = (struct sg_table *)area->sgt; | ||
564 | |||
565 | unmap_iovm_area(obj, area); | ||
566 | |||
567 | fn(area->va); | ||
568 | |||
569 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, | ||
570 | area->da_start, da, area->da_end, | ||
571 | area->da_end - area->da_start, area->flags); | ||
572 | |||
573 | free_iovm_area(obj, area); | ||
574 | out: | ||
575 | mutex_unlock(&obj->mmap_lock); | ||
576 | |||
577 | return sgt; | ||
578 | } | ||
579 | |||
580 | static u32 map_iommu_region(struct iommu *obj, u32 da, | ||
581 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | ||
582 | { | ||
583 | int err = -ENOMEM; | ||
584 | struct iovm_struct *new; | ||
585 | |||
586 | mutex_lock(&obj->mmap_lock); | ||
587 | |||
588 | new = alloc_iovm_area(obj, da, bytes, flags); | ||
589 | if (IS_ERR(new)) { | ||
590 | err = PTR_ERR(new); | ||
591 | goto err_alloc_iovma; | ||
592 | } | ||
593 | new->va = va; | ||
594 | new->sgt = sgt; | ||
595 | |||
596 | if (map_iovm_area(obj, new, sgt, new->flags)) | ||
597 | goto err_map; | ||
598 | |||
599 | mutex_unlock(&obj->mmap_lock); | ||
600 | |||
601 | dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", | ||
602 | __func__, new->da_start, bytes, new->flags, va); | ||
603 | |||
604 | return new->da_start; | ||
605 | |||
606 | err_map: | ||
607 | free_iovm_area(obj, new); | ||
608 | err_alloc_iovma: | ||
609 | mutex_unlock(&obj->mmap_lock); | ||
610 | return err; | ||
611 | } | ||
612 | |||
613 | static inline u32 __iommu_vmap(struct iommu *obj, u32 da, | ||
614 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | ||
615 | { | ||
616 | return map_iommu_region(obj, da, sgt, va, bytes, flags); | ||
617 | } | ||
618 | |||
619 | /** | ||
620 | * iommu_vmap - (d)-(p)-(v) address mapper | ||
621 | * @obj: objective iommu | ||
622 | * @sgt: address of scatter gather table | ||
623 | * @flags: iovma and page property | ||
624 | * | ||
625 | * Creates 1-n-1 mapping with given @sgt and returns @da. | ||
626 | * All @sgt element must be io page size aligned. | ||
627 | */ | ||
628 | u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, | ||
629 | u32 flags) | ||
630 | { | ||
631 | size_t bytes; | ||
632 | void *va = NULL; | ||
633 | |||
634 | if (!obj || !obj->dev || !sgt) | ||
635 | return -EINVAL; | ||
636 | |||
637 | bytes = sgtable_len(sgt); | ||
638 | if (!bytes) | ||
639 | return -EINVAL; | ||
640 | bytes = PAGE_ALIGN(bytes); | ||
641 | |||
642 | if (flags & IOVMF_MMIO) { | ||
643 | va = vmap_sg(sgt); | ||
644 | if (IS_ERR(va)) | ||
645 | return PTR_ERR(va); | ||
646 | } | ||
647 | |||
648 | flags |= IOVMF_DISCONT; | ||
649 | flags |= IOVMF_MMIO; | ||
650 | |||
651 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | ||
652 | if (IS_ERR_VALUE(da)) | ||
653 | vunmap_sg(va); | ||
654 | |||
655 | return da; | ||
656 | } | ||
657 | EXPORT_SYMBOL_GPL(iommu_vmap); | ||
658 | |||
659 | /** | ||
660 | * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' | ||
661 | * @obj: objective iommu | ||
662 | * @da: iommu device virtual address | ||
663 | * | ||
664 | * Free the iommu virtually contiguous memory area starting at | ||
665 | * @da, which was returned by 'iommu_vmap()'. | ||
666 | */ | ||
667 | struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) | ||
668 | { | ||
669 | struct sg_table *sgt; | ||
670 | /* | ||
671 | * 'sgt' is allocated before 'iommu_vmalloc()' is called. | ||
672 | * Just returns 'sgt' to the caller to free | ||
673 | */ | ||
674 | sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); | ||
675 | if (!sgt) | ||
676 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
677 | return sgt; | ||
678 | } | ||
679 | EXPORT_SYMBOL_GPL(iommu_vunmap); | ||
680 | |||
681 | /** | ||
682 | * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper | ||
683 | * @obj: objective iommu | ||
684 | * @da: contiguous iommu virtual memory | ||
685 | * @bytes: allocation size | ||
686 | * @flags: iovma and page property | ||
687 | * | ||
688 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns | ||
689 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. | ||
690 | */ | ||
691 | u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | ||
692 | { | ||
693 | void *va; | ||
694 | struct sg_table *sgt; | ||
695 | |||
696 | if (!obj || !obj->dev || !bytes) | ||
697 | return -EINVAL; | ||
698 | |||
699 | bytes = PAGE_ALIGN(bytes); | ||
700 | |||
701 | va = vmalloc(bytes); | ||
702 | if (!va) | ||
703 | return -ENOMEM; | ||
704 | |||
705 | flags |= IOVMF_DISCONT; | ||
706 | flags |= IOVMF_ALLOC; | ||
707 | |||
708 | sgt = sgtable_alloc(bytes, flags, da, 0); | ||
709 | if (IS_ERR(sgt)) { | ||
710 | da = PTR_ERR(sgt); | ||
711 | goto err_sgt_alloc; | ||
712 | } | ||
713 | sgtable_fill_vmalloc(sgt, va); | ||
714 | |||
715 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | ||
716 | if (IS_ERR_VALUE(da)) | ||
717 | goto err_iommu_vmap; | ||
718 | |||
719 | return da; | ||
720 | |||
721 | err_iommu_vmap: | ||
722 | sgtable_drain_vmalloc(sgt); | ||
723 | sgtable_free(sgt); | ||
724 | err_sgt_alloc: | ||
725 | vfree(va); | ||
726 | return da; | ||
727 | } | ||
728 | EXPORT_SYMBOL_GPL(iommu_vmalloc); | ||
729 | |||
730 | /** | ||
731 | * iommu_vfree - release memory allocated by 'iommu_vmalloc()' | ||
732 | * @obj: objective iommu | ||
733 | * @da: iommu device virtual address | ||
734 | * | ||
735 | * Frees the iommu virtually continuous memory area starting at | ||
736 | * @da, as obtained from 'iommu_vmalloc()'. | ||
737 | */ | ||
738 | void iommu_vfree(struct iommu *obj, const u32 da) | ||
739 | { | ||
740 | struct sg_table *sgt; | ||
741 | |||
742 | sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); | ||
743 | if (!sgt) | ||
744 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
745 | sgtable_free(sgt); | ||
746 | } | ||
747 | EXPORT_SYMBOL_GPL(iommu_vfree); | ||
748 | |||
749 | static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, | ||
750 | size_t bytes, u32 flags) | ||
751 | { | ||
752 | struct sg_table *sgt; | ||
753 | |||
754 | sgt = sgtable_alloc(bytes, flags, da, pa); | ||
755 | if (IS_ERR(sgt)) | ||
756 | return PTR_ERR(sgt); | ||
757 | |||
758 | sgtable_fill_kmalloc(sgt, pa, da, bytes); | ||
759 | |||
760 | da = map_iommu_region(obj, da, sgt, va, bytes, flags); | ||
761 | if (IS_ERR_VALUE(da)) { | ||
762 | sgtable_drain_kmalloc(sgt); | ||
763 | sgtable_free(sgt); | ||
764 | } | ||
765 | |||
766 | return da; | ||
767 | } | ||
768 | |||
769 | /** | ||
770 | * iommu_kmap - (d)-(p)-(v) address mapper | ||
771 | * @obj: objective iommu | ||
772 | * @da: contiguous iommu virtual memory | ||
773 | * @pa: contiguous physical memory | ||
774 | * @flags: iovma and page property | ||
775 | * | ||
776 | * Creates 1-1-1 mapping and returns @da again, which can be | ||
777 | * adjusted if 'IOVMF_DA_FIXED' is not set. | ||
778 | */ | ||
779 | u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, | ||
780 | u32 flags) | ||
781 | { | ||
782 | void *va; | ||
783 | |||
784 | if (!obj || !obj->dev || !bytes) | ||
785 | return -EINVAL; | ||
786 | |||
787 | bytes = PAGE_ALIGN(bytes); | ||
788 | |||
789 | va = ioremap(pa, bytes); | ||
790 | if (!va) | ||
791 | return -ENOMEM; | ||
792 | |||
793 | flags |= IOVMF_LINEAR; | ||
794 | flags |= IOVMF_MMIO; | ||
795 | |||
796 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | ||
797 | if (IS_ERR_VALUE(da)) | ||
798 | iounmap(va); | ||
799 | |||
800 | return da; | ||
801 | } | ||
802 | EXPORT_SYMBOL_GPL(iommu_kmap); | ||
803 | |||
804 | /** | ||
805 | * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()' | ||
806 | * @obj: objective iommu | ||
807 | * @da: iommu device virtual address | ||
808 | * | ||
809 | * Frees the iommu virtually contiguous memory area starting at | ||
810 | * @da, which was passed to and was returned by'iommu_kmap()'. | ||
811 | */ | ||
812 | void iommu_kunmap(struct iommu *obj, u32 da) | ||
813 | { | ||
814 | struct sg_table *sgt; | ||
815 | typedef void (*func_t)(const void *); | ||
816 | |||
817 | sgt = unmap_vm_area(obj, da, (func_t)iounmap, | ||
818 | IOVMF_LINEAR | IOVMF_MMIO); | ||
819 | if (!sgt) | ||
820 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
821 | sgtable_free(sgt); | ||
822 | } | ||
823 | EXPORT_SYMBOL_GPL(iommu_kunmap); | ||
824 | |||
825 | /** | ||
826 | * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper | ||
827 | * @obj: objective iommu | ||
828 | * @da: contiguous iommu virtual memory | ||
829 | * @bytes: bytes for allocation | ||
830 | * @flags: iovma and page property | ||
831 | * | ||
832 | * Allocate @bytes linearly and creates 1-1-1 mapping and returns | ||
833 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. | ||
834 | */ | ||
835 | u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | ||
836 | { | ||
837 | void *va; | ||
838 | u32 pa; | ||
839 | |||
840 | if (!obj || !obj->dev || !bytes) | ||
841 | return -EINVAL; | ||
842 | |||
843 | bytes = PAGE_ALIGN(bytes); | ||
844 | |||
845 | va = kmalloc(bytes, GFP_KERNEL | GFP_DMA); | ||
846 | if (!va) | ||
847 | return -ENOMEM; | ||
848 | pa = virt_to_phys(va); | ||
849 | |||
850 | flags |= IOVMF_LINEAR; | ||
851 | flags |= IOVMF_ALLOC; | ||
852 | |||
853 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | ||
854 | if (IS_ERR_VALUE(da)) | ||
855 | kfree(va); | ||
856 | |||
857 | return da; | ||
858 | } | ||
859 | EXPORT_SYMBOL_GPL(iommu_kmalloc); | ||
860 | |||
861 | /** | ||
862 | * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()' | ||
863 | * @obj: objective iommu | ||
864 | * @da: iommu device virtual address | ||
865 | * | ||
866 | * Frees the iommu virtually contiguous memory area starting at | ||
867 | * @da, which was passed to and was returned by'iommu_kmalloc()'. | ||
868 | */ | ||
869 | void iommu_kfree(struct iommu *obj, u32 da) | ||
870 | { | ||
871 | struct sg_table *sgt; | ||
872 | |||
873 | sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); | ||
874 | if (!sgt) | ||
875 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
876 | sgtable_free(sgt); | ||
877 | } | ||
878 | EXPORT_SYMBOL_GPL(iommu_kfree); | ||
879 | |||
880 | |||
881 | static int __init iovmm_init(void) | ||
882 | { | ||
883 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | ||
884 | struct kmem_cache *p; | ||
885 | |||
886 | p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, | ||
887 | flags, NULL); | ||
888 | if (!p) | ||
889 | return -ENOMEM; | ||
890 | iovm_area_cachep = p; | ||
891 | |||
892 | return 0; | ||
893 | } | ||
894 | module_init(iovmm_init); | ||
895 | |||
896 | static void __exit iovmm_exit(void) | ||
897 | { | ||
898 | kmem_cache_destroy(iovm_area_cachep); | ||
899 | } | ||
900 | module_exit(iovmm_exit); | ||
901 | |||
902 | MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); | ||
903 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | ||
904 | MODULE_LICENSE("GPL v2"); | ||