aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/msm_iommu.c
diff options
context:
space:
mode:
authorSricharan R <sricharan@codeaurora.org>2016-06-13 07:36:06 -0400
committerJoerg Roedel <jroedel@suse.de>2016-06-21 07:57:13 -0400
commitc9220fbd7741861294dede37465243ee7efdb7bd (patch)
tree21dbf5978e877fab19378e63d483e33c854f8ad9 /drivers/iommu/msm_iommu.c
parentf78ebca8ff3d61fb45fef1274595a72d1314d955 (diff)
iommu/msm: use generic ARMV7S short descriptor pagetable ops
This iommu uses the armv7 short descriptor format. So use the generic ARMV7S pagetable ops instead of rewriting the same stuff in the driver. Signed-off-by: Sricharan R <sricharan@codeaurora.org> Tested-by: Archit Taneja <architt@codeaurora.org> Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/msm_iommu.c')
-rw-r--r--drivers/iommu/msm_iommu.c405
1 files changed, 108 insertions, 297 deletions
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 8ab064324a68..b09692bb5b0a 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -35,27 +35,27 @@
35 35
36#include "msm_iommu_hw-8xxx.h" 36#include "msm_iommu_hw-8xxx.h"
37#include "msm_iommu.h" 37#include "msm_iommu.h"
38#include "io-pgtable.h"
38 39
39#define MRC(reg, processor, op1, crn, crm, op2) \ 40#define MRC(reg, processor, op1, crn, crm, op2) \
40__asm__ __volatile__ ( \ 41__asm__ __volatile__ ( \
41" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ 42" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
42: "=r" (reg)) 43: "=r" (reg))
43 44
44#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
45#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
46
47/* bitmap of the page sizes currently supported */ 45/* bitmap of the page sizes currently supported */
48#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) 46#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
49 47
50static int msm_iommu_tex_class[4];
51
52DEFINE_SPINLOCK(msm_iommu_lock); 48DEFINE_SPINLOCK(msm_iommu_lock);
53static LIST_HEAD(qcom_iommu_devices); 49static LIST_HEAD(qcom_iommu_devices);
50static struct iommu_ops msm_iommu_ops;
54 51
55struct msm_priv { 52struct msm_priv {
56 unsigned long *pgtable;
57 struct list_head list_attached; 53 struct list_head list_attached;
58 struct iommu_domain domain; 54 struct iommu_domain domain;
55 struct io_pgtable_cfg cfg;
56 struct io_pgtable_ops *iop;
57 struct device *dev;
58 spinlock_t pgtlock; /* pagetable lock */
59}; 59};
60 60
61static struct msm_priv *to_msm_priv(struct iommu_domain *dom) 61static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
@@ -122,49 +122,79 @@ static void msm_iommu_reset(void __iomem *base, int ncb)
122 SET_TLBFLPTER(base, ctx, 0); 122 SET_TLBFLPTER(base, ctx, 0);
123 SET_TLBSLPTER(base, ctx, 0); 123 SET_TLBSLPTER(base, ctx, 0);
124 SET_TLBLKCR(base, ctx, 0); 124 SET_TLBLKCR(base, ctx, 0);
125 SET_PRRR(base, ctx, 0);
126 SET_NMRR(base, ctx, 0);
127 SET_CONTEXTIDR(base, ctx, 0); 125 SET_CONTEXTIDR(base, ctx, 0);
128 } 126 }
129} 127}
130 128
131static int __flush_iotlb(struct iommu_domain *domain) 129static void __flush_iotlb(void *cookie)
132{ 130{
133 struct msm_priv *priv = to_msm_priv(domain); 131 struct msm_priv *priv = cookie;
134 struct msm_iommu_dev *iommu = NULL; 132 struct msm_iommu_dev *iommu = NULL;
135 struct msm_iommu_ctx_dev *master; 133 struct msm_iommu_ctx_dev *master;
136 int ret = 0; 134 int ret = 0;
137 135
138#ifndef CONFIG_IOMMU_PGTABLES_L2 136 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
139 unsigned long *fl_table = priv->pgtable; 137 ret = __enable_clocks(iommu);
140 int i; 138 if (ret)
139 goto fail;
141 140
142 if (!list_empty(&priv->list_attached)) { 141 list_for_each_entry(master, &iommu->ctx_list, list)
143 dmac_flush_range(fl_table, fl_table + SZ_16K); 142 SET_CTX_TLBIALL(iommu->base, master->num, 0);
144 143
145 for (i = 0; i < NUM_FL_PTE; i++) 144 __disable_clocks(iommu);
146 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
147 void *sl_table = __va(fl_table[i] &
148 FL_BASE_MASK);
149 dmac_flush_range(sl_table, sl_table + SZ_4K);
150 }
151 } 145 }
152#endif 146fail:
147 return;
148}
149
150static void __flush_iotlb_range(unsigned long iova, size_t size,
151 size_t granule, bool leaf, void *cookie)
152{
153 struct msm_priv *priv = cookie;
154 struct msm_iommu_dev *iommu = NULL;
155 struct msm_iommu_ctx_dev *master;
156 int ret = 0;
157 int temp_size;
153 158
154 list_for_each_entry(iommu, &priv->list_attached, dom_node) { 159 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
155 ret = __enable_clocks(iommu); 160 ret = __enable_clocks(iommu);
156 if (ret) 161 if (ret)
157 goto fail; 162 goto fail;
158 163
159 list_for_each_entry(master, &iommu->ctx_list, list) 164 list_for_each_entry(master, &iommu->ctx_list, list) {
160 SET_CTX_TLBIALL(iommu->base, master->num, 0); 165 temp_size = size;
166 do {
167 iova &= TLBIVA_VA;
168 iova |= GET_CONTEXTIDR_ASID(iommu->base,
169 master->num);
170 SET_TLBIVA(iommu->base, master->num, iova);
171 iova += granule;
172 } while (temp_size -= granule);
173 }
161 174
162 __disable_clocks(iommu); 175 __disable_clocks(iommu);
163 } 176 }
177
164fail: 178fail:
165 return ret; 179 return;
166} 180}
167 181
182static void __flush_iotlb_sync(void *cookie)
183{
184 /*
185 * Nothing is needed here, the barrier to guarantee
186 * completion of the tlb sync operation is implicitly
187 * taken care when the iommu client does a writel before
188 * kick starting the other master.
189 */
190}
191
192static const struct iommu_gather_ops msm_iommu_gather_ops = {
193 .tlb_flush_all = __flush_iotlb,
194 .tlb_add_flush = __flush_iotlb_range,
195 .tlb_sync = __flush_iotlb_sync,
196};
197
168static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) 198static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
169{ 199{
170 int idx; 200 int idx;
@@ -232,15 +262,17 @@ static void __reset_context(void __iomem *base, int ctx)
232 SET_TLBFLPTER(base, ctx, 0); 262 SET_TLBFLPTER(base, ctx, 0);
233 SET_TLBSLPTER(base, ctx, 0); 263 SET_TLBSLPTER(base, ctx, 0);
234 SET_TLBLKCR(base, ctx, 0); 264 SET_TLBLKCR(base, ctx, 0);
235 SET_PRRR(base, ctx, 0);
236 SET_NMRR(base, ctx, 0);
237} 265}
238 266
239static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) 267static void __program_context(void __iomem *base, int ctx,
268 struct msm_priv *priv)
240{ 269{
241 unsigned int prrr, nmrr;
242 __reset_context(base, ctx); 270 __reset_context(base, ctx);
243 271
272 /* Turn on TEX Remap */
273 SET_TRE(base, ctx, 1);
274 SET_AFE(base, ctx, 1);
275
244 /* Set up HTW mode */ 276 /* Set up HTW mode */
245 /* TLB miss configuration: perform HTW on miss */ 277 /* TLB miss configuration: perform HTW on miss */
246 SET_TLBMCFG(base, ctx, 0x3); 278 SET_TLBMCFG(base, ctx, 0x3);
@@ -248,8 +280,13 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
248 /* V2P configuration: HTW for access */ 280 /* V2P configuration: HTW for access */
249 SET_V2PCFG(base, ctx, 0x3); 281 SET_V2PCFG(base, ctx, 0x3);
250 282
251 SET_TTBCR(base, ctx, 0); 283 SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
252 SET_TTBR0_PA(base, ctx, (pgtable >> 14)); 284 SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
285 SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
286
287 /* Set prrr and nmrr */
288 SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
289 SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
253 290
254 /* Invalidate the TLB for this context */ 291 /* Invalidate the TLB for this context */
255 SET_CTX_TLBIALL(base, ctx, 0); 292 SET_CTX_TLBIALL(base, ctx, 0);
@@ -268,38 +305,9 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
268 SET_RCOSH(base, ctx, 1); 305 SET_RCOSH(base, ctx, 1);
269 SET_RCNSH(base, ctx, 1); 306 SET_RCNSH(base, ctx, 1);
270 307
271 /* Turn on TEX Remap */
272 SET_TRE(base, ctx, 1);
273
274 /* Set TEX remap attributes */
275 RCP15_PRRR(prrr);
276 RCP15_NMRR(nmrr);
277 SET_PRRR(base, ctx, prrr);
278 SET_NMRR(base, ctx, nmrr);
279
280 /* Turn on BFB prefetch */ 308 /* Turn on BFB prefetch */
281 SET_BFBDFE(base, ctx, 1); 309 SET_BFBDFE(base, ctx, 1);
282 310
283#ifdef CONFIG_IOMMU_PGTABLES_L2
284 /* Configure page tables as inner-cacheable and shareable to reduce
285 * the TLB miss penalty.
286 */
287 SET_TTBR0_SH(base, ctx, 1);
288 SET_TTBR1_SH(base, ctx, 1);
289
290 SET_TTBR0_NOS(base, ctx, 1);
291 SET_TTBR1_NOS(base, ctx, 1);
292
293 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
294 SET_TTBR0_IRGNL(base, ctx, 1);
295
296 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
297 SET_TTBR1_IRGNL(base, ctx, 1);
298
299 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
300 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
301#endif
302
303 /* Enable the MMU */ 311 /* Enable the MMU */
304 SET_M(base, ctx, 1); 312 SET_M(base, ctx, 1);
305} 313}
@@ -316,13 +324,6 @@ static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
316 goto fail_nomem; 324 goto fail_nomem;
317 325
318 INIT_LIST_HEAD(&priv->list_attached); 326 INIT_LIST_HEAD(&priv->list_attached);
319 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
320 get_order(SZ_16K));
321
322 if (!priv->pgtable)
323 goto fail_nomem;
324
325 memset(priv->pgtable, 0, SZ_16K);
326 327
327 priv->domain.geometry.aperture_start = 0; 328 priv->domain.geometry.aperture_start = 0;
328 priv->domain.geometry.aperture_end = (1ULL << 32) - 1; 329 priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
@@ -339,24 +340,35 @@ static void msm_iommu_domain_free(struct iommu_domain *domain)
339{ 340{
340 struct msm_priv *priv; 341 struct msm_priv *priv;
341 unsigned long flags; 342 unsigned long flags;
342 unsigned long *fl_table;
343 int i;
344 343
345 spin_lock_irqsave(&msm_iommu_lock, flags); 344 spin_lock_irqsave(&msm_iommu_lock, flags);
346 priv = to_msm_priv(domain); 345 priv = to_msm_priv(domain);
346 kfree(priv);
347 spin_unlock_irqrestore(&msm_iommu_lock, flags);
348}
347 349
348 fl_table = priv->pgtable; 350static int msm_iommu_domain_config(struct msm_priv *priv)
351{
352 spin_lock_init(&priv->pgtlock);
349 353
350 for (i = 0; i < NUM_FL_PTE; i++) 354 priv->cfg = (struct io_pgtable_cfg) {
351 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) 355 .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
352 free_page((unsigned long) __va(((fl_table[i]) & 356 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
353 FL_BASE_MASK))); 357 .ias = 32,
358 .oas = 32,
359 .tlb = &msm_iommu_gather_ops,
360 .iommu_dev = priv->dev,
361 };
354 362
355 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); 363 priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
356 priv->pgtable = NULL; 364 if (!priv->iop) {
365 dev_err(priv->dev, "Failed to allocate pgtable\n");
366 return -EINVAL;
367 }
357 368
358 kfree(priv); 369 msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
359 spin_unlock_irqrestore(&msm_iommu_lock, flags); 370
371 return 0;
360} 372}
361 373
362static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) 374static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -367,6 +379,9 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
367 struct msm_priv *priv = to_msm_priv(domain); 379 struct msm_priv *priv = to_msm_priv(domain);
368 struct msm_iommu_ctx_dev *master; 380 struct msm_iommu_ctx_dev *master;
369 381
382 priv->dev = dev;
383 msm_iommu_domain_config(priv);
384
370 spin_lock_irqsave(&msm_iommu_lock, flags); 385 spin_lock_irqsave(&msm_iommu_lock, flags);
371 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { 386 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
372 master = list_first_entry(&iommu->ctx_list, 387 master = list_first_entry(&iommu->ctx_list,
@@ -392,14 +407,13 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
392 } 407 }
393 config_mids(iommu, master); 408 config_mids(iommu, master);
394 __program_context(iommu->base, master->num, 409 __program_context(iommu->base, master->num,
395 __pa(priv->pgtable)); 410 priv);
396 } 411 }
397 __disable_clocks(iommu); 412 __disable_clocks(iommu);
398 list_add(&iommu->dom_node, &priv->list_attached); 413 list_add(&iommu->dom_node, &priv->list_attached);
399 } 414 }
400 } 415 }
401 416
402 ret = __flush_iotlb(domain);
403fail: 417fail:
404 spin_unlock_irqrestore(&msm_iommu_lock, flags); 418 spin_unlock_irqrestore(&msm_iommu_lock, flags);
405 419
@@ -415,11 +429,9 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
415 struct msm_iommu_ctx_dev *master; 429 struct msm_iommu_ctx_dev *master;
416 int ret; 430 int ret;
417 431
418 spin_lock_irqsave(&msm_iommu_lock, flags); 432 free_io_pgtable_ops(priv->iop);
419 ret = __flush_iotlb(domain);
420 if (ret)
421 goto fail;
422 433
434 spin_lock_irqsave(&msm_iommu_lock, flags);
423 list_for_each_entry(iommu, &priv->list_attached, dom_node) { 435 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
424 ret = __enable_clocks(iommu); 436 ret = __enable_clocks(iommu);
425 if (ret) 437 if (ret)
@@ -435,190 +447,30 @@ fail:
435 spin_unlock_irqrestore(&msm_iommu_lock, flags); 447 spin_unlock_irqrestore(&msm_iommu_lock, flags);
436} 448}
437 449
438static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, 450static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
439 phys_addr_t pa, size_t len, int prot) 451 phys_addr_t pa, size_t len, int prot)
440{ 452{
441 struct msm_priv *priv; 453 struct msm_priv *priv = to_msm_priv(domain);
442 unsigned long flags; 454 unsigned long flags;
443 unsigned long *fl_table; 455 int ret;
444 unsigned long *fl_pte;
445 unsigned long fl_offset;
446 unsigned long *sl_table;
447 unsigned long *sl_pte;
448 unsigned long sl_offset;
449 unsigned int pgprot;
450 int ret = 0, tex, sh;
451
452 spin_lock_irqsave(&msm_iommu_lock, flags);
453
454 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
455 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
456
457 if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
458 ret = -EINVAL;
459 goto fail;
460 }
461
462 priv = to_msm_priv(domain);
463
464 fl_table = priv->pgtable;
465
466 if (len != SZ_16M && len != SZ_1M &&
467 len != SZ_64K && len != SZ_4K) {
468 pr_debug("Bad size: %d\n", len);
469 ret = -EINVAL;
470 goto fail;
471 }
472
473 if (!fl_table) {
474 pr_debug("Null page table\n");
475 ret = -EINVAL;
476 goto fail;
477 }
478
479 if (len == SZ_16M || len == SZ_1M) {
480 pgprot = sh ? FL_SHARED : 0;
481 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
482 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
483 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
484 } else {
485 pgprot = sh ? SL_SHARED : 0;
486 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
487 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
488 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
489 }
490
491 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
492 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
493
494 if (len == SZ_16M) {
495 int i = 0;
496 for (i = 0; i < 16; i++)
497 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
498 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
499 FL_SHARED | FL_NG | pgprot;
500 }
501
502 if (len == SZ_1M)
503 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
504 FL_TYPE_SECT | FL_SHARED | pgprot;
505
506 /* Need a 2nd level table */
507 if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
508 unsigned long *sl;
509 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
510 get_order(SZ_4K));
511
512 if (!sl) {
513 pr_debug("Could not allocate second level table\n");
514 ret = -ENOMEM;
515 goto fail;
516 }
517
518 memset(sl, 0, SZ_4K);
519 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
520 }
521
522 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
523 sl_offset = SL_OFFSET(va);
524 sl_pte = sl_table + sl_offset;
525
526
527 if (len == SZ_4K)
528 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
529 SL_SHARED | SL_TYPE_SMALL | pgprot;
530
531 if (len == SZ_64K) {
532 int i;
533 456
534 for (i = 0; i < 16; i++) 457 spin_lock_irqsave(&priv->pgtlock, flags);
535 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | 458 ret = priv->iop->map(priv->iop, iova, pa, len, prot);
536 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; 459 spin_unlock_irqrestore(&priv->pgtlock, flags);
537 }
538 460
539 ret = __flush_iotlb(domain);
540fail:
541 spin_unlock_irqrestore(&msm_iommu_lock, flags);
542 return ret; 461 return ret;
543} 462}
544 463
545static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, 464static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
546 size_t len) 465 size_t len)
547{ 466{
548 struct msm_priv *priv; 467 struct msm_priv *priv = to_msm_priv(domain);
549 unsigned long flags; 468 unsigned long flags;
550 unsigned long *fl_table;
551 unsigned long *fl_pte;
552 unsigned long fl_offset;
553 unsigned long *sl_table;
554 unsigned long *sl_pte;
555 unsigned long sl_offset;
556 int i, ret = 0;
557
558 spin_lock_irqsave(&msm_iommu_lock, flags);
559
560 priv = to_msm_priv(domain);
561 469
562 fl_table = priv->pgtable; 470 spin_lock_irqsave(&priv->pgtlock, flags);
471 len = priv->iop->unmap(priv->iop, iova, len);
472 spin_unlock_irqrestore(&priv->pgtlock, flags);
563 473
564 if (len != SZ_16M && len != SZ_1M &&
565 len != SZ_64K && len != SZ_4K) {
566 pr_debug("Bad length: %d\n", len);
567 goto fail;
568 }
569
570 if (!fl_table) {
571 pr_debug("Null page table\n");
572 goto fail;
573 }
574
575 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
576 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
577
578 if (*fl_pte == 0) {
579 pr_debug("First level PTE is 0\n");
580 goto fail;
581 }
582
583 /* Unmap supersection */
584 if (len == SZ_16M)
585 for (i = 0; i < 16; i++)
586 *(fl_pte+i) = 0;
587
588 if (len == SZ_1M)
589 *fl_pte = 0;
590
591 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
592 sl_offset = SL_OFFSET(va);
593 sl_pte = sl_table + sl_offset;
594
595 if (len == SZ_64K) {
596 for (i = 0; i < 16; i++)
597 *(sl_pte+i) = 0;
598 }
599
600 if (len == SZ_4K)
601 *sl_pte = 0;
602
603 if (len == SZ_4K || len == SZ_64K) {
604 int used = 0;
605
606 for (i = 0; i < NUM_SL_PTE; i++)
607 if (sl_table[i])
608 used = 1;
609 if (!used) {
610 free_page((unsigned long)sl_table);
611 *fl_pte = 0;
612 }
613 }
614
615 ret = __flush_iotlb(domain);
616
617fail:
618 spin_unlock_irqrestore(&msm_iommu_lock, flags);
619
620 /* the IOMMU API requires us to return how many bytes were unmapped */
621 len = ret ? 0 : len;
622 return len; 474 return len;
623} 475}
624 476
@@ -699,8 +551,6 @@ static void print_ctx_regs(void __iomem *base, int ctx)
699 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); 551 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
700 pr_err("SCTLR = %08x ACTLR = %08x\n", 552 pr_err("SCTLR = %08x ACTLR = %08x\n",
701 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); 553 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
702 pr_err("PRRR = %08x NMRR = %08x\n",
703 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
704} 554}
705 555
706static void insert_iommu_master(struct device *dev, 556static void insert_iommu_master(struct device *dev,
@@ -941,47 +791,8 @@ static void __exit msm_iommu_driver_exit(void)
941subsys_initcall(msm_iommu_driver_init); 791subsys_initcall(msm_iommu_driver_init);
942module_exit(msm_iommu_driver_exit); 792module_exit(msm_iommu_driver_exit);
943 793
944static int __init get_tex_class(int icp, int ocp, int mt, int nos)
945{
946 int i = 0;
947 unsigned int prrr = 0;
948 unsigned int nmrr = 0;
949 int c_icp, c_ocp, c_mt, c_nos;
950
951 RCP15_PRRR(prrr);
952 RCP15_NMRR(nmrr);
953
954 for (i = 0; i < NUM_TEX_CLASS; i++) {
955 c_nos = PRRR_NOS(prrr, i);
956 c_mt = PRRR_MT(prrr, i);
957 c_icp = NMRR_ICP(nmrr, i);
958 c_ocp = NMRR_OCP(nmrr, i);
959
960 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
961 return i;
962 }
963
964 return -ENODEV;
965}
966
967static void __init setup_iommu_tex_classes(void)
968{
969 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
970 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
971
972 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
973 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
974
975 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
976 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
977
978 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
979 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
980}
981
982static int __init msm_iommu_init(void) 794static int __init msm_iommu_init(void)
983{ 795{
984 setup_iommu_tex_classes();
985 bus_set_iommu(&platform_bus_type, &msm_iommu_ops); 796 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
986 return 0; 797 return 0;
987} 798}