aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-msm/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-msm/iommu.c')
-rw-r--r--arch/arm/mach-msm/iommu.c146
1 files changed, 115 insertions, 31 deletions
diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c
index f71747db3bee..e2d58e4cb0d7 100644
--- a/arch/arm/mach-msm/iommu.c
+++ b/arch/arm/mach-msm/iommu.c
@@ -33,6 +33,16 @@
33#include <mach/iommu_hw-8xxx.h> 33#include <mach/iommu_hw-8xxx.h>
34#include <mach/iommu.h> 34#include <mach/iommu.h>
35 35
36#define MRC(reg, processor, op1, crn, crm, op2) \
37__asm__ __volatile__ ( \
38" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
39: "=r" (reg))
40
41#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
42#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
43
44static int msm_iommu_tex_class[4];
45
36DEFINE_SPINLOCK(msm_iommu_lock); 46DEFINE_SPINLOCK(msm_iommu_lock);
37 47
38struct msm_priv { 48struct msm_priv {
@@ -40,23 +50,26 @@ struct msm_priv {
40 struct list_head list_attached; 50 struct list_head list_attached;
41}; 51};
42 52
43static void __flush_iotlb(struct iommu_domain *domain) 53static int __flush_iotlb(struct iommu_domain *domain)
44{ 54{
45 struct msm_priv *priv = domain->priv; 55 struct msm_priv *priv = domain->priv;
46 struct msm_iommu_drvdata *iommu_drvdata; 56 struct msm_iommu_drvdata *iommu_drvdata;
47 struct msm_iommu_ctx_drvdata *ctx_drvdata; 57 struct msm_iommu_ctx_drvdata *ctx_drvdata;
48 58 int ret = 0;
49#ifndef CONFIG_IOMMU_PGTABLES_L2 59#ifndef CONFIG_IOMMU_PGTABLES_L2
50 unsigned long *fl_table = priv->pgtable; 60 unsigned long *fl_table = priv->pgtable;
51 int i; 61 int i;
52 62
53 dmac_flush_range(fl_table, fl_table + SZ_16K); 63 if (!list_empty(&priv->list_attached)) {
64 dmac_flush_range(fl_table, fl_table + SZ_16K);
54 65
55 for (i = 0; i < NUM_FL_PTE; i++) 66 for (i = 0; i < NUM_FL_PTE; i++)
56 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { 67 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
57 void *sl_table = __va(fl_table[i] & FL_BASE_MASK); 68 void *sl_table = __va(fl_table[i] &
58 dmac_flush_range(sl_table, sl_table + SZ_4K); 69 FL_BASE_MASK);
59 } 70 dmac_flush_range(sl_table, sl_table + SZ_4K);
71 }
72 }
60#endif 73#endif
61 74
62 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { 75 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
@@ -66,6 +79,8 @@ static void __flush_iotlb(struct iommu_domain *domain)
66 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); 79 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
67 SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); 80 SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
68 } 81 }
82
83 return ret;
69} 84}
70 85
71static void __reset_context(void __iomem *base, int ctx) 86static void __reset_context(void __iomem *base, int ctx)
@@ -95,6 +110,7 @@ static void __reset_context(void __iomem *base, int ctx)
95 110
96static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) 111static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
97{ 112{
113 unsigned int prrr, nmrr;
98 __reset_context(base, ctx); 114 __reset_context(base, ctx);
99 115
100 /* Set up HTW mode */ 116 /* Set up HTW mode */
@@ -127,11 +143,11 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
127 /* Turn on TEX Remap */ 143 /* Turn on TEX Remap */
128 SET_TRE(base, ctx, 1); 144 SET_TRE(base, ctx, 1);
129 145
130 /* Do not configure PRRR / NMRR on the IOMMU for now. We will assume 146 /* Set TEX remap attributes */
131 * TEX class 0 for everything until attributes are properly worked out 147 RCP15_PRRR(prrr);
132 */ 148 RCP15_NMRR(nmrr);
133 SET_PRRR(base, ctx, 0); 149 SET_PRRR(base, ctx, prrr);
134 SET_NMRR(base, ctx, 0); 150 SET_NMRR(base, ctx, nmrr);
135 151
136 /* Turn on BFB prefetch */ 152 /* Turn on BFB prefetch */
137 SET_BFBDFE(base, ctx, 1); 153 SET_BFBDFE(base, ctx, 1);
@@ -238,6 +254,11 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
238 goto fail; 254 goto fail;
239 } 255 }
240 256
257 if (!list_empty(&ctx_drvdata->attached_elm)) {
258 ret = -EBUSY;
259 goto fail;
260 }
261
241 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) 262 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
242 if (tmp_drvdata == ctx_drvdata) { 263 if (tmp_drvdata == ctx_drvdata) {
243 ret = -EBUSY; 264 ret = -EBUSY;
@@ -248,7 +269,7 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
248 __pa(priv->pgtable)); 269 __pa(priv->pgtable));
249 270
250 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); 271 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
251 __flush_iotlb(domain); 272 ret = __flush_iotlb(domain);
252 273
253fail: 274fail:
254 spin_unlock_irqrestore(&msm_iommu_lock, flags); 275 spin_unlock_irqrestore(&msm_iommu_lock, flags);
@@ -263,6 +284,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
263 struct msm_iommu_drvdata *iommu_drvdata; 284 struct msm_iommu_drvdata *iommu_drvdata;
264 struct msm_iommu_ctx_drvdata *ctx_drvdata; 285 struct msm_iommu_ctx_drvdata *ctx_drvdata;
265 unsigned long flags; 286 unsigned long flags;
287 int ret;
266 288
267 spin_lock_irqsave(&msm_iommu_lock, flags); 289 spin_lock_irqsave(&msm_iommu_lock, flags);
268 priv = domain->priv; 290 priv = domain->priv;
@@ -277,7 +299,10 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
277 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) 299 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
278 goto fail; 300 goto fail;
279 301
280 __flush_iotlb(domain); 302 ret = __flush_iotlb(domain);
303 if (ret)
304 goto fail;
305
281 __reset_context(iommu_drvdata->base, ctx_dev->num); 306 __reset_context(iommu_drvdata->base, ctx_dev->num);
282 list_del_init(&ctx_drvdata->attached_elm); 307 list_del_init(&ctx_drvdata->attached_elm);
283 308
@@ -296,12 +321,21 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
296 unsigned long *sl_table; 321 unsigned long *sl_table;
297 unsigned long *sl_pte; 322 unsigned long *sl_pte;
298 unsigned long sl_offset; 323 unsigned long sl_offset;
324 unsigned int pgprot;
299 size_t len = 0x1000UL << order; 325 size_t len = 0x1000UL << order;
300 int ret = 0; 326 int ret = 0, tex, sh;
301 327
302 spin_lock_irqsave(&msm_iommu_lock, flags); 328 spin_lock_irqsave(&msm_iommu_lock, flags);
303 priv = domain->priv;
304 329
330 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
331 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
332
333 if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
334 ret = -EINVAL;
335 goto fail;
336 }
337
338 priv = domain->priv;
305 if (!priv) { 339 if (!priv) {
306 ret = -EINVAL; 340 ret = -EINVAL;
307 goto fail; 341 goto fail;
@@ -322,6 +356,18 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
322 goto fail; 356 goto fail;
323 } 357 }
324 358
359 if (len == SZ_16M || len == SZ_1M) {
360 pgprot = sh ? FL_SHARED : 0;
361 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
362 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
363 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
364 } else {
365 pgprot = sh ? SL_SHARED : 0;
366 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
367 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
368 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
369 }
370
325 fl_offset = FL_OFFSET(va); /* Upper 12 bits */ 371 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
326 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ 372 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
327 373
@@ -330,17 +376,17 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
330 for (i = 0; i < 16; i++) 376 for (i = 0; i < 16; i++)
331 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | 377 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
332 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | 378 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
333 FL_SHARED; 379 FL_SHARED | pgprot;
334 } 380 }
335 381
336 if (len == SZ_1M) 382 if (len == SZ_1M)
337 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | 383 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE |
338 FL_TYPE_SECT | FL_SHARED; 384 FL_TYPE_SECT | FL_SHARED | pgprot;
339 385
340 /* Need a 2nd level table */ 386 /* Need a 2nd level table */
341 if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { 387 if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
342 unsigned long *sl; 388 unsigned long *sl;
343 sl = (unsigned long *) __get_free_pages(GFP_KERNEL, 389 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
344 get_order(SZ_4K)); 390 get_order(SZ_4K));
345 391
346 if (!sl) { 392 if (!sl) {
@@ -360,17 +406,17 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
360 406
361 if (len == SZ_4K) 407 if (len == SZ_4K)
362 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | 408 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 |
363 SL_SHARED | SL_TYPE_SMALL; 409 SL_SHARED | SL_TYPE_SMALL | pgprot;
364 410
365 if (len == SZ_64K) { 411 if (len == SZ_64K) {
366 int i; 412 int i;
367 413
368 for (i = 0; i < 16; i++) 414 for (i = 0; i < 16; i++)
369 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | 415 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
370 SL_AP1 | SL_SHARED | SL_TYPE_LARGE; 416 SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
371 } 417 }
372 418
373 __flush_iotlb(domain); 419 ret = __flush_iotlb(domain);
374fail: 420fail:
375 spin_unlock_irqrestore(&msm_iommu_lock, flags); 421 spin_unlock_irqrestore(&msm_iommu_lock, flags);
376 return ret; 422 return ret;
@@ -455,7 +501,7 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
455 } 501 }
456 } 502 }
457 503
458 __flush_iotlb(domain); 504 ret = __flush_iotlb(domain);
459fail: 505fail:
460 spin_unlock_irqrestore(&msm_iommu_lock, flags); 506 spin_unlock_irqrestore(&msm_iommu_lock, flags);
461 return ret; 507 return ret;
@@ -490,9 +536,6 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
490 SET_CTX_TLBIALL(base, ctx, 0); 536 SET_CTX_TLBIALL(base, ctx, 0);
491 SET_V2PPR_VA(base, ctx, va >> V2Pxx_VA_SHIFT); 537 SET_V2PPR_VA(base, ctx, va >> V2Pxx_VA_SHIFT);
492 538
493 if (GET_FAULT(base, ctx))
494 goto fail;
495
496 par = GET_PAR(base, ctx); 539 par = GET_PAR(base, ctx);
497 540
498 /* We are dealing with a supersection */ 541 /* We are dealing with a supersection */
@@ -501,6 +544,9 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
501 else /* Upper 20 bits from PAR, lower 12 from VA */ 544 else /* Upper 20 bits from PAR, lower 12 from VA */
502 ret = (par & 0xFFFFF000) | (va & 0x00000FFF); 545 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
503 546
547 if (GET_FAULT(base, ctx))
548 ret = 0;
549
504fail: 550fail:
505 spin_unlock_irqrestore(&msm_iommu_lock, flags); 551 spin_unlock_irqrestore(&msm_iommu_lock, flags);
506 return ret; 552 return ret;
@@ -543,8 +589,8 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
543{ 589{
544 struct msm_iommu_drvdata *drvdata = dev_id; 590 struct msm_iommu_drvdata *drvdata = dev_id;
545 void __iomem *base; 591 void __iomem *base;
546 unsigned int fsr = 0; 592 unsigned int fsr;
547 int ncb = 0, i = 0; 593 int ncb, i;
548 594
549 spin_lock(&msm_iommu_lock); 595 spin_lock(&msm_iommu_lock);
550 596
@@ -555,7 +601,6 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
555 601
556 base = drvdata->base; 602 base = drvdata->base;
557 603
558 pr_err("===== WOAH! =====\n");
559 pr_err("Unexpected IOMMU page fault!\n"); 604 pr_err("Unexpected IOMMU page fault!\n");
560 pr_err("base = %08x\n", (unsigned int) base); 605 pr_err("base = %08x\n", (unsigned int) base);
561 606
@@ -585,8 +630,47 @@ static struct iommu_ops msm_iommu_ops = {
585 .domain_has_cap = msm_iommu_domain_has_cap 630 .domain_has_cap = msm_iommu_domain_has_cap
586}; 631};
587 632
588static int msm_iommu_init(void) 633static int __init get_tex_class(int icp, int ocp, int mt, int nos)
634{
635 int i = 0;
636 unsigned int prrr = 0;
637 unsigned int nmrr = 0;
638 int c_icp, c_ocp, c_mt, c_nos;
639
640 RCP15_PRRR(prrr);
641 RCP15_NMRR(nmrr);
642
643 for (i = 0; i < NUM_TEX_CLASS; i++) {
644 c_nos = PRRR_NOS(prrr, i);
645 c_mt = PRRR_MT(prrr, i);
646 c_icp = NMRR_ICP(nmrr, i);
647 c_ocp = NMRR_OCP(nmrr, i);
648
649 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
650 return i;
651 }
652
653 return -ENODEV;
654}
655
656static void __init setup_iommu_tex_classes(void)
657{
658 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
659 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
660
661 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
662 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
663
664 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
665 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
666
667 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
668 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
669}
670
671static int __init msm_iommu_init(void)
589{ 672{
673 setup_iommu_tex_classes();
590 register_iommu(&msm_iommu_ops); 674 register_iommu(&msm_iommu_ops);
591 return 0; 675 return 0;
592} 676}