aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorCho KyongHo <pullip.cho@samsung.com>2014-05-12 02:14:46 -0400
committerJoerg Roedel <jroedel@suse.de>2014-05-13 13:12:51 -0400
commit7222e8db2d506197ee183de0f9b76b3ad97e8c18 (patch)
treecde77e7536114477e72df9d7a4437f9c1160eae0 /drivers/iommu
parentd6d211db37e75de2ddc3a4f979038c40df7cc79c (diff)
iommu/exynos: Fix build errors
Commit 25e9d28d92 (ARM: EXYNOS: remove system mmu initialization from exynos tree) removed arch/arm/mach-exynos/mach/sysmmu.h header without removing remaining use of it from exynos-iommu driver, thus causing a compilation error. This patch fixes the error by removing respective include line from exynos-iommu.c. Use of __pa and __va macro is changed to virt_to_phys and phys_to_virt which are recommended in driver code. printk formatting of physical address is also fixed to %pa. Also System MMU driver is changed to control only a single instance of System MMU at a time. Since a single instance of System MMU has only a single clock descriptor for its clock gating, single address range for control registers, there is no need to obtain two or more clock descriptors and ioremaped region. CC: Tomasz Figa <t.figa@samsung.com> Signed-off-by: Cho KyongHo <pullip.cho@samsung.com> Signed-off-by: Shaik Ameer Basha <shaik.ameer@samsung.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/exynos-iommu.c255
1 files changed, 85 insertions, 170 deletions
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 074018979cdf..8d7c3f9632f8 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -29,8 +29,6 @@
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30#include <asm/pgtable.h> 30#include <asm/pgtable.h>
31 31
32#include <mach/sysmmu.h>
33
34/* We does not consider super section mapping (16MB) */ 32/* We does not consider super section mapping (16MB) */
35#define SECT_ORDER 20 33#define SECT_ORDER 20
36#define LPAGE_ORDER 16 34#define LPAGE_ORDER 16
@@ -108,7 +106,8 @@ static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
108 106
109static unsigned long *page_entry(unsigned long *sent, unsigned long iova) 107static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
110{ 108{
111 return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova); 109 return (unsigned long *)phys_to_virt(
110 lv2table_base(sent)) + lv2ent_offset(iova);
112} 111}
113 112
114enum exynos_sysmmu_inttype { 113enum exynos_sysmmu_inttype {
@@ -132,7 +131,7 @@ enum exynos_sysmmu_inttype {
132 * translated. This is 0 if @itype is SYSMMU_BUSERROR. 131 * translated. This is 0 if @itype is SYSMMU_BUSERROR.
133 */ 132 */
134typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype, 133typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
135 unsigned long pgtable_base, unsigned long fault_addr); 134 phys_addr_t pgtable_base, unsigned long fault_addr);
136 135
137static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = { 136static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
138 REG_PAGE_FAULT_ADDR, 137 REG_PAGE_FAULT_ADDR,
@@ -170,14 +169,13 @@ struct sysmmu_drvdata {
170 struct device *sysmmu; /* System MMU's device descriptor */ 169 struct device *sysmmu; /* System MMU's device descriptor */
171 struct device *dev; /* Owner of system MMU */ 170 struct device *dev; /* Owner of system MMU */
172 char *dbgname; 171 char *dbgname;
173 int nsfrs; 172 void __iomem *sfrbase;
174 void __iomem **sfrbases; 173 struct clk *clk;
175 struct clk *clk[2];
176 int activations; 174 int activations;
177 rwlock_t lock; 175 rwlock_t lock;
178 struct iommu_domain *domain; 176 struct iommu_domain *domain;
179 sysmmu_fault_handler_t fault_handler; 177 sysmmu_fault_handler_t fault_handler;
180 unsigned long pgtable; 178 phys_addr_t pgtable;
181}; 179};
182 180
183static bool set_sysmmu_active(struct sysmmu_drvdata *data) 181static bool set_sysmmu_active(struct sysmmu_drvdata *data)
@@ -266,17 +264,17 @@ void exynos_sysmmu_set_fault_handler(struct device *dev,
266} 264}
267 265
268static int default_fault_handler(enum exynos_sysmmu_inttype itype, 266static int default_fault_handler(enum exynos_sysmmu_inttype itype,
269 unsigned long pgtable_base, unsigned long fault_addr) 267 phys_addr_t pgtable_base, unsigned long fault_addr)
270{ 268{
271 unsigned long *ent; 269 unsigned long *ent;
272 270
273 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT)) 271 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
274 itype = SYSMMU_FAULT_UNKNOWN; 272 itype = SYSMMU_FAULT_UNKNOWN;
275 273
276 pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n", 274 pr_err("%s occurred at 0x%lx(Page table base: %pa)\n",
277 sysmmu_fault_name[itype], fault_addr, pgtable_base); 275 sysmmu_fault_name[itype], fault_addr, &pgtable_base);
278 276
279 ent = section_entry(__va(pgtable_base), fault_addr); 277 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
280 pr_err("\tLv1 entry: 0x%lx\n", *ent); 278 pr_err("\tLv1 entry: 0x%lx\n", *ent);
281 279
282 if (lv1ent_page(ent)) { 280 if (lv1ent_page(ent)) {
@@ -295,56 +293,39 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
295{ 293{
296 /* SYSMMU is in blocked when interrupt occurred. */ 294 /* SYSMMU is in blocked when interrupt occurred. */
297 struct sysmmu_drvdata *data = dev_id; 295 struct sysmmu_drvdata *data = dev_id;
298 struct resource *irqres;
299 struct platform_device *pdev;
300 enum exynos_sysmmu_inttype itype; 296 enum exynos_sysmmu_inttype itype;
301 unsigned long addr = -1; 297 unsigned long addr = -1;
302 298 int ret = -ENOSYS;
303 int i, ret = -ENOSYS;
304 299
305 read_lock(&data->lock); 300 read_lock(&data->lock);
306 301
307 WARN_ON(!is_sysmmu_active(data)); 302 WARN_ON(!is_sysmmu_active(data));
308 303
309 pdev = to_platform_device(data->sysmmu); 304 itype = (enum exynos_sysmmu_inttype)
310 for (i = 0; i < (pdev->num_resources / 2); i++) { 305 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
311 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i); 306 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
312 if (irqres && ((int)irqres->start == irq))
313 break;
314 }
315
316 if (i == pdev->num_resources) {
317 itype = SYSMMU_FAULT_UNKNOWN; 307 itype = SYSMMU_FAULT_UNKNOWN;
318 } else { 308 else
319 itype = (enum exynos_sysmmu_inttype) 309 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
320 __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
321 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
322 itype = SYSMMU_FAULT_UNKNOWN;
323 else
324 addr = __raw_readl(
325 data->sfrbases[i] + fault_reg_offset[itype]);
326 }
327 310
328 if (data->domain) 311 if (data->domain)
329 ret = report_iommu_fault(data->domain, data->dev, 312 ret = report_iommu_fault(data->domain, data->dev, addr, itype);
330 addr, itype);
331 313
332 if ((ret == -ENOSYS) && data->fault_handler) { 314 if ((ret == -ENOSYS) && data->fault_handler) {
333 unsigned long base = data->pgtable; 315 unsigned long base = data->pgtable;
334 if (itype != SYSMMU_FAULT_UNKNOWN) 316 if (itype != SYSMMU_FAULT_UNKNOWN)
335 base = __raw_readl( 317 base = __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
336 data->sfrbases[i] + REG_PT_BASE_ADDR);
337 ret = data->fault_handler(itype, base, addr); 318 ret = data->fault_handler(itype, base, addr);
338 } 319 }
339 320
340 if (!ret && (itype != SYSMMU_FAULT_UNKNOWN)) 321 if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
341 __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR); 322 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
342 else 323 else
343 dev_dbg(data->sysmmu, "(%s) %s is not handled.\n", 324 dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
344 data->dbgname, sysmmu_fault_name[itype]); 325 data->dbgname, sysmmu_fault_name[itype]);
345 326
346 if (itype != SYSMMU_FAULT_UNKNOWN) 327 if (itype != SYSMMU_FAULT_UNKNOWN)
347 sysmmu_unblock(data->sfrbases[i]); 328 sysmmu_unblock(data->sfrbase);
348 329
349 read_unlock(&data->lock); 330 read_unlock(&data->lock);
350 331
@@ -355,20 +336,16 @@ static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
355{ 336{
356 unsigned long flags; 337 unsigned long flags;
357 bool disabled = false; 338 bool disabled = false;
358 int i;
359 339
360 write_lock_irqsave(&data->lock, flags); 340 write_lock_irqsave(&data->lock, flags);
361 341
362 if (!set_sysmmu_inactive(data)) 342 if (!set_sysmmu_inactive(data))
363 goto finish; 343 goto finish;
364 344
365 for (i = 0; i < data->nsfrs; i++) 345 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
366 __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
367 346
368 if (data->clk[1]) 347 if (!IS_ERR(data->clk))
369 clk_disable(data->clk[1]); 348 clk_disable(data->clk);
370 if (data->clk[0])
371 clk_disable(data->clk[0]);
372 349
373 disabled = true; 350 disabled = true;
374 data->pgtable = 0; 351 data->pgtable = 0;
@@ -394,7 +371,7 @@ finish:
394static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data, 371static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
395 unsigned long pgtable, struct iommu_domain *domain) 372 unsigned long pgtable, struct iommu_domain *domain)
396{ 373{
397 int i, ret = 0; 374 int ret = 0;
398 unsigned long flags; 375 unsigned long flags;
399 376
400 write_lock_irqsave(&data->lock, flags); 377 write_lock_irqsave(&data->lock, flags);
@@ -411,27 +388,22 @@ static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
411 goto finish; 388 goto finish;
412 } 389 }
413 390
414 if (data->clk[0]) 391 if (!IS_ERR(data->clk))
415 clk_enable(data->clk[0]); 392 clk_enable(data->clk);
416 if (data->clk[1])
417 clk_enable(data->clk[1]);
418 393
419 data->pgtable = pgtable; 394 data->pgtable = pgtable;
420 395
421 for (i = 0; i < data->nsfrs; i++) { 396 __sysmmu_set_ptbase(data->sfrbase, pgtable);
422 __sysmmu_set_ptbase(data->sfrbases[i], pgtable); 397 if ((readl(data->sfrbase + REG_MMU_VERSION) >> 28) == 3) {
423 398 /* System MMU version is 3.x */
424 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) { 399 __raw_writel((1 << 12) | (2 << 28),
425 /* System MMU version is 3.x */ 400 data->sfrbase + REG_MMU_CFG);
426 __raw_writel((1 << 12) | (2 << 28), 401 __sysmmu_set_prefbuf(data->sfrbase, 0, -1, 0);
427 data->sfrbases[i] + REG_MMU_CFG); 402 __sysmmu_set_prefbuf(data->sfrbase, 0, -1, 1);
428 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
429 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
430 }
431
432 __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
433 } 403 }
434 404
405 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
406
435 data->domain = domain; 407 data->domain = domain;
436 408
437 dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname); 409 dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
@@ -458,7 +430,7 @@ int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
458 if (WARN_ON(ret < 0)) { 430 if (WARN_ON(ret < 0)) {
459 pm_runtime_put(data->sysmmu); 431 pm_runtime_put(data->sysmmu);
460 dev_err(data->sysmmu, 432 dev_err(data->sysmmu,
461 "(%s) Already enabled with page table %#lx\n", 433 "(%s) Already enabled with page table %#x\n",
462 data->dbgname, data->pgtable); 434 data->dbgname, data->pgtable);
463 } else { 435 } else {
464 data->dev = dev; 436 data->dev = dev;
@@ -486,13 +458,10 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
486 read_lock_irqsave(&data->lock, flags); 458 read_lock_irqsave(&data->lock, flags);
487 459
488 if (is_sysmmu_active(data)) { 460 if (is_sysmmu_active(data)) {
489 int i; 461 if (sysmmu_block(data->sfrbase)) {
490 for (i = 0; i < data->nsfrs; i++) { 462 __sysmmu_tlb_invalidate_entry(
491 if (sysmmu_block(data->sfrbases[i])) { 463 data->sfrbase, iova);
492 __sysmmu_tlb_invalidate_entry( 464 sysmmu_unblock(data->sfrbase);
493 data->sfrbases[i], iova);
494 sysmmu_unblock(data->sfrbases[i]);
495 }
496 } 465 }
497 } else { 466 } else {
498 dev_dbg(data->sysmmu, 467 dev_dbg(data->sysmmu,
@@ -511,12 +480,9 @@ void exynos_sysmmu_tlb_invalidate(struct device *dev)
511 read_lock_irqsave(&data->lock, flags); 480 read_lock_irqsave(&data->lock, flags);
512 481
513 if (is_sysmmu_active(data)) { 482 if (is_sysmmu_active(data)) {
514 int i; 483 if (sysmmu_block(data->sfrbase)) {
515 for (i = 0; i < data->nsfrs; i++) { 484 __sysmmu_tlb_invalidate(data->sfrbase);
516 if (sysmmu_block(data->sfrbases[i])) { 485 sysmmu_unblock(data->sfrbase);
517 __sysmmu_tlb_invalidate(data->sfrbases[i]);
518 sysmmu_unblock(data->sfrbases[i]);
519 }
520 } 486 }
521 } else { 487 } else {
522 dev_dbg(data->sysmmu, 488 dev_dbg(data->sysmmu,
@@ -529,11 +495,10 @@ void exynos_sysmmu_tlb_invalidate(struct device *dev)
529 495
530static int exynos_sysmmu_probe(struct platform_device *pdev) 496static int exynos_sysmmu_probe(struct platform_device *pdev)
531{ 497{
532 int i, ret; 498 int ret;
533 struct device *dev; 499 struct device *dev = &pdev->dev;
534 struct sysmmu_drvdata *data; 500 struct sysmmu_drvdata *data;
535 501 struct resource *res;
536 dev = &pdev->dev;
537 502
538 data = kzalloc(sizeof(*data), GFP_KERNEL); 503 data = kzalloc(sizeof(*data), GFP_KERNEL);
539 if (!data) { 504 if (!data) {
@@ -542,82 +507,37 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
542 goto err_alloc; 507 goto err_alloc;
543 } 508 }
544 509
545 ret = dev_set_drvdata(dev, data); 510 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
546 if (ret) { 511 if (!res) {
547 dev_dbg(dev, "Unabled to initialize driver data\n"); 512 dev_dbg(dev, "Unable to find IOMEM region\n");
513 ret = -ENOENT;
548 goto err_init; 514 goto err_init;
549 } 515 }
550 516
551 data->nsfrs = pdev->num_resources / 2; 517 data->sfrbase = ioremap(res->start, resource_size(res));
552 data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs, 518 if (!data->sfrbase) {
553 GFP_KERNEL); 519 dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n", res->start);
554 if (data->sfrbases == NULL) { 520 ret = -ENOENT;
555 dev_dbg(dev, "Not enough memory\n"); 521 goto err_res;
556 ret = -ENOMEM;
557 goto err_init;
558 } 522 }
559 523
560 for (i = 0; i < data->nsfrs; i++) { 524 ret = platform_get_irq(pdev, 0);
561 struct resource *res; 525 if (ret <= 0) {
562 res = platform_get_resource(pdev, IORESOURCE_MEM, i); 526 dev_dbg(dev, "Unable to find IRQ resource\n");
563 if (!res) { 527 goto err_irq;
564 dev_dbg(dev, "Unable to find IOMEM region\n");
565 ret = -ENOENT;
566 goto err_res;
567 }
568
569 data->sfrbases[i] = ioremap(res->start, resource_size(res));
570 if (!data->sfrbases[i]) {
571 dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
572 res->start);
573 ret = -ENOENT;
574 goto err_res;
575 }
576 } 528 }
577 529
578 for (i = 0; i < data->nsfrs; i++) { 530 ret = request_irq(ret, exynos_sysmmu_irq, 0,
579 ret = platform_get_irq(pdev, i); 531 dev_name(dev), data);
580 if (ret <= 0) { 532 if (ret) {
581 dev_dbg(dev, "Unable to find IRQ resource\n"); 533 dev_dbg(dev, "Unabled to register interrupt handler\n");
582 goto err_irq; 534 goto err_irq;
583 }
584
585 ret = request_irq(ret, exynos_sysmmu_irq, 0,
586 dev_name(dev), data);
587 if (ret) {
588 dev_dbg(dev, "Unabled to register interrupt handler\n");
589 goto err_irq;
590 }
591 } 535 }
592 536
593 if (dev_get_platdata(dev)) { 537 if (dev_get_platdata(dev)) {
594 char *deli, *beg; 538 data->clk = clk_get(dev, "sysmmu");
595 struct sysmmu_platform_data *platdata = dev_get_platdata(dev); 539 if (IS_ERR(data->clk))
596
597 beg = platdata->clockname;
598
599 for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
600 /* NOTHING */;
601
602 if (*deli == '\0')
603 deli = NULL;
604 else
605 *deli = '\0';
606
607 data->clk[0] = clk_get(dev, beg);
608 if (IS_ERR(data->clk[0])) {
609 data->clk[0] = NULL;
610 dev_dbg(dev, "No clock descriptor registered\n"); 540 dev_dbg(dev, "No clock descriptor registered\n");
611 }
612
613 if (data->clk[0] && deli) {
614 *deli = ',';
615 data->clk[1] = clk_get(dev, deli + 1);
616 if (IS_ERR(data->clk[1]))
617 data->clk[1] = NULL;
618 }
619
620 data->dbgname = platdata->dbgname;
621 } 541 }
622 542
623 data->sysmmu = dev; 543 data->sysmmu = dev;
@@ -626,22 +546,17 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
626 546
627 __set_fault_handler(data, &default_fault_handler); 547 __set_fault_handler(data, &default_fault_handler);
628 548
549 platform_set_drvdata(pdev, data);
550
629 if (dev->parent) 551 if (dev->parent)
630 pm_runtime_enable(dev); 552 pm_runtime_enable(dev);
631 553
632 dev_dbg(dev, "(%s) Initialized\n", data->dbgname); 554 dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
633 return 0; 555 return 0;
634err_irq: 556err_irq:
635 while (i-- > 0) { 557 free_irq(platform_get_irq(pdev, 0), data);
636 int irq;
637
638 irq = platform_get_irq(pdev, i);
639 free_irq(irq, data);
640 }
641err_res: 558err_res:
642 while (data->nsfrs-- > 0) 559 iounmap(data->sfrbase);
643 iounmap(data->sfrbases[data->nsfrs]);
644 kfree(data->sfrbases);
645err_init: 560err_init:
646 kfree(data); 561 kfree(data);
647err_alloc: 562err_alloc:
@@ -722,7 +637,7 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
722 637
723 for (i = 0; i < NUM_LV1ENTRIES; i++) 638 for (i = 0; i < NUM_LV1ENTRIES; i++)
724 if (lv1ent_page(priv->pgtable + i)) 639 if (lv1ent_page(priv->pgtable + i))
725 kfree(__va(lv2table_base(priv->pgtable + i))); 640 kfree(phys_to_virt(lv2table_base(priv->pgtable + i)));
726 641
727 free_pages((unsigned long)priv->pgtable, 2); 642 free_pages((unsigned long)priv->pgtable, 2);
728 free_pages((unsigned long)priv->lv2entcnt, 1); 643 free_pages((unsigned long)priv->lv2entcnt, 1);
@@ -735,6 +650,7 @@ static int exynos_iommu_attach_device(struct iommu_domain *domain,
735{ 650{
736 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); 651 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
737 struct exynos_iommu_domain *priv = domain->priv; 652 struct exynos_iommu_domain *priv = domain->priv;
653 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
738 unsigned long flags; 654 unsigned long flags;
739 int ret; 655 int ret;
740 656
@@ -746,7 +662,7 @@ static int exynos_iommu_attach_device(struct iommu_domain *domain,
746 662
747 spin_lock_irqsave(&priv->lock, flags); 663 spin_lock_irqsave(&priv->lock, flags);
748 664
749 ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain); 665 ret = __exynos_sysmmu_enable(data, pagetable, domain);
750 666
751 if (ret == 0) { 667 if (ret == 0) {
752 /* 'data->node' must not be appeared in priv->clients */ 668 /* 'data->node' must not be appeared in priv->clients */
@@ -758,17 +674,15 @@ static int exynos_iommu_attach_device(struct iommu_domain *domain,
758 spin_unlock_irqrestore(&priv->lock, flags); 674 spin_unlock_irqrestore(&priv->lock, flags);
759 675
760 if (ret < 0) { 676 if (ret < 0) {
761 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n", 677 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
762 __func__, __pa(priv->pgtable)); 678 __func__, &pagetable);
763 pm_runtime_put(data->sysmmu); 679 pm_runtime_put(data->sysmmu);
764 } else if (ret > 0) { 680 return ret;
765 dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
766 __func__, __pa(priv->pgtable));
767 } else {
768 dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
769 __func__, __pa(priv->pgtable));
770 } 681 }
771 682
683 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
684 __func__, &pagetable, (ret == 0) ? "" : ", again");
685
772 return ret; 686 return ret;
773} 687}
774 688
@@ -778,6 +692,7 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain,
778 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); 692 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
779 struct exynos_iommu_domain *priv = domain->priv; 693 struct exynos_iommu_domain *priv = domain->priv;
780 struct list_head *pos; 694 struct list_head *pos;
695 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
781 unsigned long flags; 696 unsigned long flags;
782 bool found = false; 697 bool found = false;
783 698
@@ -794,13 +709,13 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain,
794 goto finish; 709 goto finish;
795 710
796 if (__exynos_sysmmu_disable(data)) { 711 if (__exynos_sysmmu_disable(data)) {
797 dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n", 712 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
798 __func__, __pa(priv->pgtable)); 713 __func__, &pagetable);
799 list_del_init(&data->node); 714 list_del_init(&data->node);
800 715
801 } else { 716 } else {
802 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed", 717 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %pa delayed",
803 __func__, __pa(priv->pgtable)); 718 __func__, &pagetable);
804 } 719 }
805 720
806finish: 721finish:
@@ -821,7 +736,7 @@ static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
821 if (!pent) 736 if (!pent)
822 return NULL; 737 return NULL;
823 738
824 *sent = mk_lv1ent_page(__pa(pent)); 739 *sent = mk_lv1ent_page(virt_to_phys(pent));
825 *pgcounter = NUM_LV2ENTRIES; 740 *pgcounter = NUM_LV2ENTRIES;
826 pgtable_flush(pent, pent + NUM_LV2ENTRIES); 741 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
827 pgtable_flush(sent, sent + 1); 742 pgtable_flush(sent, sent + 1);