aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/omap-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/omap-iommu.c')
-rw-r--r--drivers/iommu/omap-iommu.c312
1 files changed, 196 insertions, 116 deletions
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 18003c044454..bbb7dcef02d3 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -76,53 +76,23 @@ struct iotlb_lock {
76 short vict; 76 short vict;
77}; 77};
78 78
79/* accommodate the difference between omap1 and omap2/3 */
80static const struct iommu_functions *arch_iommu;
81
82static struct platform_driver omap_iommu_driver; 79static struct platform_driver omap_iommu_driver;
83static struct kmem_cache *iopte_cachep; 80static struct kmem_cache *iopte_cachep;
84 81
85/** 82/**
86 * omap_install_iommu_arch - Install archtecure specific iommu functions
87 * @ops: a pointer to architecture specific iommu functions
88 *
89 * There are several kind of iommu algorithm(tlb, pagetable) among
90 * omap series. This interface installs such an iommu algorighm.
91 **/
92int omap_install_iommu_arch(const struct iommu_functions *ops)
93{
94 if (arch_iommu)
95 return -EBUSY;
96
97 arch_iommu = ops;
98 return 0;
99}
100EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
101
102/**
103 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
104 * @ops: a pointer to architecture specific iommu functions
105 *
106 * This interface uninstalls the iommu algorighm installed previously.
107 **/
108void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
109{
110 if (arch_iommu != ops)
111 pr_err("%s: not your arch\n", __func__);
112
113 arch_iommu = NULL;
114}
115EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
116
117/**
118 * omap_iommu_save_ctx - Save registers for pm off-mode support 83 * omap_iommu_save_ctx - Save registers for pm off-mode support
119 * @dev: client device 84 * @dev: client device
120 **/ 85 **/
121void omap_iommu_save_ctx(struct device *dev) 86void omap_iommu_save_ctx(struct device *dev)
122{ 87{
123 struct omap_iommu *obj = dev_to_omap_iommu(dev); 88 struct omap_iommu *obj = dev_to_omap_iommu(dev);
89 u32 *p = obj->ctx;
90 int i;
124 91
125 arch_iommu->save_ctx(obj); 92 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
93 p[i] = iommu_read_reg(obj, i * sizeof(u32));
94 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
95 }
126} 96}
127EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); 97EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
128 98
@@ -133,28 +103,74 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
133void omap_iommu_restore_ctx(struct device *dev) 103void omap_iommu_restore_ctx(struct device *dev)
134{ 104{
135 struct omap_iommu *obj = dev_to_omap_iommu(dev); 105 struct omap_iommu *obj = dev_to_omap_iommu(dev);
106 u32 *p = obj->ctx;
107 int i;
136 108
137 arch_iommu->restore_ctx(obj); 109 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
110 iommu_write_reg(obj, p[i], i * sizeof(u32));
111 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
112 }
138} 113}
139EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); 114EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
140 115
141/** 116static void __iommu_set_twl(struct omap_iommu *obj, bool on)
142 * omap_iommu_arch_version - Return running iommu arch version
143 **/
144u32 omap_iommu_arch_version(void)
145{ 117{
146 return arch_iommu->version; 118 u32 l = iommu_read_reg(obj, MMU_CNTL);
119
120 if (on)
121 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
122 else
123 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
124
125 l &= ~MMU_CNTL_MASK;
126 if (on)
127 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
128 else
129 l |= (MMU_CNTL_MMU_EN);
130
131 iommu_write_reg(obj, l, MMU_CNTL);
132}
133
134static int omap2_iommu_enable(struct omap_iommu *obj)
135{
136 u32 l, pa;
137
138 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
139 return -EINVAL;
140
141 pa = virt_to_phys(obj->iopgd);
142 if (!IS_ALIGNED(pa, SZ_16K))
143 return -EINVAL;
144
145 l = iommu_read_reg(obj, MMU_REVISION);
146 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
147 (l >> 4) & 0xf, l & 0xf);
148
149 iommu_write_reg(obj, pa, MMU_TTB);
150
151 if (obj->has_bus_err_back)
152 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
153
154 __iommu_set_twl(obj, true);
155
156 return 0;
157}
158
159static void omap2_iommu_disable(struct omap_iommu *obj)
160{
161 u32 l = iommu_read_reg(obj, MMU_CNTL);
162
163 l &= ~MMU_CNTL_MASK;
164 iommu_write_reg(obj, l, MMU_CNTL);
165
166 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
147} 167}
148EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
149 168
150static int iommu_enable(struct omap_iommu *obj) 169static int iommu_enable(struct omap_iommu *obj)
151{ 170{
152 int err; 171 int err;
153 struct platform_device *pdev = to_platform_device(obj->dev); 172 struct platform_device *pdev = to_platform_device(obj->dev);
154 struct iommu_platform_data *pdata = pdev->dev.platform_data; 173 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
155
156 if (!arch_iommu)
157 return -ENODEV;
158 174
159 if (pdata && pdata->deassert_reset) { 175 if (pdata && pdata->deassert_reset) {
160 err = pdata->deassert_reset(pdev, pdata->reset_name); 176 err = pdata->deassert_reset(pdev, pdata->reset_name);
@@ -166,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj)
166 182
167 pm_runtime_get_sync(obj->dev); 183 pm_runtime_get_sync(obj->dev);
168 184
169 err = arch_iommu->enable(obj); 185 err = omap2_iommu_enable(obj);
170 186
171 return err; 187 return err;
172} 188}
@@ -174,9 +190,9 @@ static int iommu_enable(struct omap_iommu *obj)
174static void iommu_disable(struct omap_iommu *obj) 190static void iommu_disable(struct omap_iommu *obj)
175{ 191{
176 struct platform_device *pdev = to_platform_device(obj->dev); 192 struct platform_device *pdev = to_platform_device(obj->dev);
177 struct iommu_platform_data *pdata = pdev->dev.platform_data; 193 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
178 194
179 arch_iommu->disable(obj); 195 omap2_iommu_disable(obj);
180 196
181 pm_runtime_put_sync(obj->dev); 197 pm_runtime_put_sync(obj->dev);
182 198
@@ -187,44 +203,51 @@ static void iommu_disable(struct omap_iommu *obj)
187/* 203/*
188 * TLB operations 204 * TLB operations
189 */ 205 */
190void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
191{
192 BUG_ON(!cr || !e);
193
194 arch_iommu->cr_to_e(cr, e);
195}
196EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
197
198static inline int iotlb_cr_valid(struct cr_regs *cr) 206static inline int iotlb_cr_valid(struct cr_regs *cr)
199{ 207{
200 if (!cr) 208 if (!cr)
201 return -EINVAL; 209 return -EINVAL;
202 210
203 return arch_iommu->cr_valid(cr); 211 return cr->cam & MMU_CAM_V;
204}
205
206static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
207 struct iotlb_entry *e)
208{
209 if (!e)
210 return NULL;
211
212 return arch_iommu->alloc_cr(obj, e);
213} 212}
214 213
215static u32 iotlb_cr_to_virt(struct cr_regs *cr) 214static u32 iotlb_cr_to_virt(struct cr_regs *cr)
216{ 215{
217 return arch_iommu->cr_to_virt(cr); 216 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
217 u32 mask = get_cam_va_mask(cr->cam & page_size);
218
219 return cr->cam & mask;
218} 220}
219 221
220static u32 get_iopte_attr(struct iotlb_entry *e) 222static u32 get_iopte_attr(struct iotlb_entry *e)
221{ 223{
222 return arch_iommu->get_pte_attr(e); 224 u32 attr;
225
226 attr = e->mixed << 5;
227 attr |= e->endian;
228 attr |= e->elsz >> 3;
229 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
230 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
231 return attr;
223} 232}
224 233
225static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) 234static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
226{ 235{
227 return arch_iommu->fault_isr(obj, da); 236 u32 status, fault_addr;
237
238 status = iommu_read_reg(obj, MMU_IRQSTATUS);
239 status &= MMU_IRQ_MASK;
240 if (!status) {
241 *da = 0;
242 return 0;
243 }
244
245 fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
246 *da = fault_addr;
247
248 iommu_write_reg(obj, status, MMU_IRQSTATUS);
249
250 return status;
228} 251}
229 252
230static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) 253static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
@@ -250,31 +273,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
250 273
251static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) 274static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
252{ 275{
253 arch_iommu->tlb_read_cr(obj, cr); 276 cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
277 cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
254} 278}
255 279
256static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) 280static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
257{ 281{
258 arch_iommu->tlb_load_cr(obj, cr); 282 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
283 iommu_write_reg(obj, cr->ram, MMU_RAM);
259 284
260 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 285 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
261 iommu_write_reg(obj, 1, MMU_LD_TLB); 286 iommu_write_reg(obj, 1, MMU_LD_TLB);
262} 287}
263 288
264/**
265 * iotlb_dump_cr - Dump an iommu tlb entry into buf
266 * @obj: target iommu
267 * @cr: contents of cam and ram register
268 * @buf: output buffer
269 **/
270static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
271 char *buf)
272{
273 BUG_ON(!cr || !buf);
274
275 return arch_iommu->dump_cr(obj, cr, buf);
276}
277
278/* only used in iotlb iteration for-loop */ 289/* only used in iotlb iteration for-loop */
279static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) 290static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
280{ 291{
@@ -289,12 +300,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
289 return cr; 300 return cr;
290} 301}
291 302
303#ifdef PREFETCH_IOTLB
304static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
305 struct iotlb_entry *e)
306{
307 struct cr_regs *cr;
308
309 if (!e)
310 return NULL;
311
312 if (e->da & ~(get_cam_va_mask(e->pgsz))) {
313 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
314 e->da);
315 return ERR_PTR(-EINVAL);
316 }
317
318 cr = kmalloc(sizeof(*cr), GFP_KERNEL);
319 if (!cr)
320 return ERR_PTR(-ENOMEM);
321
322 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
323 cr->ram = e->pa | e->endian | e->elsz | e->mixed;
324
325 return cr;
326}
327
292/** 328/**
293 * load_iotlb_entry - Set an iommu tlb entry 329 * load_iotlb_entry - Set an iommu tlb entry
294 * @obj: target iommu 330 * @obj: target iommu
295 * @e: an iommu tlb entry info 331 * @e: an iommu tlb entry info
296 **/ 332 **/
297#ifdef PREFETCH_IOTLB
298static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 333static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
299{ 334{
300 int err = 0; 335 int err = 0;
@@ -423,7 +458,45 @@ static void flush_iotlb_all(struct omap_iommu *obj)
423 pm_runtime_put_sync(obj->dev); 458 pm_runtime_put_sync(obj->dev);
424} 459}
425 460
426#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) 461#ifdef CONFIG_OMAP_IOMMU_DEBUG
462
463#define pr_reg(name) \
464 do { \
465 ssize_t bytes; \
466 const char *str = "%20s: %08x\n"; \
467 const int maxcol = 32; \
468 bytes = snprintf(p, maxcol, str, __stringify(name), \
469 iommu_read_reg(obj, MMU_##name)); \
470 p += bytes; \
471 len -= bytes; \
472 if (len < maxcol) \
473 goto out; \
474 } while (0)
475
476static ssize_t
477omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
478{
479 char *p = buf;
480
481 pr_reg(REVISION);
482 pr_reg(IRQSTATUS);
483 pr_reg(IRQENABLE);
484 pr_reg(WALKING_ST);
485 pr_reg(CNTL);
486 pr_reg(FAULT_AD);
487 pr_reg(TTB);
488 pr_reg(LOCK);
489 pr_reg(LD_TLB);
490 pr_reg(CAM);
491 pr_reg(RAM);
492 pr_reg(GFLUSH);
493 pr_reg(FLUSH_ENTRY);
494 pr_reg(READ_CAM);
495 pr_reg(READ_RAM);
496 pr_reg(EMU_FAULT_AD);
497out:
498 return p - buf;
499}
427 500
428ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) 501ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
429{ 502{
@@ -432,13 +505,12 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
432 505
433 pm_runtime_get_sync(obj->dev); 506 pm_runtime_get_sync(obj->dev);
434 507
435 bytes = arch_iommu->dump_ctx(obj, buf, bytes); 508 bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
436 509
437 pm_runtime_put_sync(obj->dev); 510 pm_runtime_put_sync(obj->dev);
438 511
439 return bytes; 512 return bytes;
440} 513}
441EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
442 514
443static int 515static int
444__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) 516__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
@@ -464,6 +536,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
464} 536}
465 537
466/** 538/**
539 * iotlb_dump_cr - Dump an iommu tlb entry into buf
540 * @obj: target iommu
541 * @cr: contents of cam and ram register
542 * @buf: output buffer
543 **/
544static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
545 char *buf)
546{
547 char *p = buf;
548
549 /* FIXME: Need more detail analysis of cam/ram */
550 p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
551 (cr->cam & MMU_CAM_P) ? 1 : 0);
552
553 return p - buf;
554}
555
556/**
467 * omap_dump_tlb_entries - dump cr arrays to given buffer 557 * omap_dump_tlb_entries - dump cr arrays to given buffer
468 * @obj: target iommu 558 * @obj: target iommu
469 * @buf: output buffer 559 * @buf: output buffer
@@ -488,16 +578,8 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
488 578
489 return p - buf; 579 return p - buf;
490} 580}
491EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
492
493int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
494{
495 return driver_for_each_device(&omap_iommu_driver.driver,
496 NULL, data, fn);
497}
498EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
499 581
500#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ 582#endif /* CONFIG_OMAP_IOMMU_DEBUG */
501 583
502/* 584/*
503 * H/W pagetable operations 585 * H/W pagetable operations
@@ -680,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
680 * @obj: target iommu 762 * @obj: target iommu
681 * @e: an iommu tlb entry info 763 * @e: an iommu tlb entry info
682 **/ 764 **/
683int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) 765static int
766omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
684{ 767{
685 int err; 768 int err;
686 769
@@ -690,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
690 prefetch_iotlb_entry(obj, e); 773 prefetch_iotlb_entry(obj, e);
691 return err; 774 return err;
692} 775}
693EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
694 776
695/** 777/**
696 * iopgtable_lookup_entry - Lookup an iommu pte entry 778 * iopgtable_lookup_entry - Lookup an iommu pte entry
@@ -819,8 +901,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
819 u32 *iopgd, *iopte; 901 u32 *iopgd, *iopte;
820 struct omap_iommu *obj = data; 902 struct omap_iommu *obj = data;
821 struct iommu_domain *domain = obj->domain; 903 struct iommu_domain *domain = obj->domain;
904 struct omap_iommu_domain *omap_domain = domain->priv;
822 905
823 if (!obj->refcount) 906 if (!omap_domain->iommu_dev)
824 return IRQ_NONE; 907 return IRQ_NONE;
825 908
826 errs = iommu_report_fault(obj, &da); 909 errs = iommu_report_fault(obj, &da);
@@ -880,13 +963,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
880 963
881 spin_lock(&obj->iommu_lock); 964 spin_lock(&obj->iommu_lock);
882 965
883 /* an iommu device can only be attached once */
884 if (++obj->refcount > 1) {
885 dev_err(dev, "%s: already attached!\n", obj->name);
886 err = -EBUSY;
887 goto err_enable;
888 }
889
890 obj->iopgd = iopgd; 966 obj->iopgd = iopgd;
891 err = iommu_enable(obj); 967 err = iommu_enable(obj);
892 if (err) 968 if (err)
@@ -899,7 +975,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
899 return obj; 975 return obj;
900 976
901err_enable: 977err_enable:
902 obj->refcount--;
903 spin_unlock(&obj->iommu_lock); 978 spin_unlock(&obj->iommu_lock);
904 return ERR_PTR(err); 979 return ERR_PTR(err);
905} 980}
@@ -915,9 +990,7 @@ static void omap_iommu_detach(struct omap_iommu *obj)
915 990
916 spin_lock(&obj->iommu_lock); 991 spin_lock(&obj->iommu_lock);
917 992
918 if (--obj->refcount == 0) 993 iommu_disable(obj);
919 iommu_disable(obj);
920
921 obj->iopgd = NULL; 994 obj->iopgd = NULL;
922 995
923 spin_unlock(&obj->iommu_lock); 996 spin_unlock(&obj->iommu_lock);
@@ -934,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
934 int irq; 1007 int irq;
935 struct omap_iommu *obj; 1008 struct omap_iommu *obj;
936 struct resource *res; 1009 struct resource *res;
937 struct iommu_platform_data *pdata = pdev->dev.platform_data; 1010 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
938 struct device_node *of = pdev->dev.of_node; 1011 struct device_node *of = pdev->dev.of_node;
939 1012
940 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 1013 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
@@ -981,6 +1054,8 @@ static int omap_iommu_probe(struct platform_device *pdev)
981 pm_runtime_irq_safe(obj->dev); 1054 pm_runtime_irq_safe(obj->dev);
982 pm_runtime_enable(obj->dev); 1055 pm_runtime_enable(obj->dev);
983 1056
1057 omap_iommu_debugfs_add(obj);
1058
984 dev_info(&pdev->dev, "%s registered\n", obj->name); 1059 dev_info(&pdev->dev, "%s registered\n", obj->name);
985 return 0; 1060 return 0;
986} 1061}
@@ -990,6 +1065,7 @@ static int omap_iommu_remove(struct platform_device *pdev)
990 struct omap_iommu *obj = platform_get_drvdata(pdev); 1065 struct omap_iommu *obj = platform_get_drvdata(pdev);
991 1066
992 iopgtable_clear_entry_all(obj); 1067 iopgtable_clear_entry_all(obj);
1068 omap_iommu_debugfs_remove(obj);
993 1069
994 pm_runtime_disable(obj->dev); 1070 pm_runtime_disable(obj->dev);
995 1071
@@ -1026,7 +1102,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
1026 e->da = da; 1102 e->da = da;
1027 e->pa = pa; 1103 e->pa = pa;
1028 e->valid = MMU_CAM_V; 1104 e->valid = MMU_CAM_V;
1029 /* FIXME: add OMAP1 support */
1030 e->pgsz = pgsz; 1105 e->pgsz = pgsz;
1031 e->endian = MMU_RAM_ENDIAN_LITTLE; 1106 e->endian = MMU_RAM_ENDIAN_LITTLE;
1032 e->elsz = MMU_RAM_ELSZ_8; 1107 e->elsz = MMU_RAM_ELSZ_8;
@@ -1131,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1131 1206
1132 omap_domain->iommu_dev = arch_data->iommu_dev = NULL; 1207 omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
1133 omap_domain->dev = NULL; 1208 omap_domain->dev = NULL;
1209 oiommu->domain = NULL;
1134} 1210}
1135 1211
1136static void omap_iommu_detach_dev(struct iommu_domain *domain, 1212static void omap_iommu_detach_dev(struct iommu_domain *domain,
@@ -1309,6 +1385,8 @@ static int __init omap_iommu_init(void)
1309 1385
1310 bus_set_iommu(&platform_bus_type, &omap_iommu_ops); 1386 bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1311 1387
1388 omap_iommu_debugfs_init();
1389
1312 return platform_driver_register(&omap_iommu_driver); 1390 return platform_driver_register(&omap_iommu_driver);
1313} 1391}
1314/* must be ready before omap3isp is probed */ 1392/* must be ready before omap3isp is probed */
@@ -1319,6 +1397,8 @@ static void __exit omap_iommu_exit(void)
1319 kmem_cache_destroy(iopte_cachep); 1397 kmem_cache_destroy(iopte_cachep);
1320 1398
1321 platform_driver_unregister(&omap_iommu_driver); 1399 platform_driver_unregister(&omap_iommu_driver);
1400
1401 omap_iommu_debugfs_exit();
1322} 1402}
1323module_exit(omap_iommu_exit); 1403module_exit(omap_iommu_exit);
1324 1404