aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>2014-05-15 06:40:47 -0400
committerJoerg Roedel <jroedel@suse.de>2014-05-26 05:22:24 -0400
commit14e5123ee9bb0d91ceb451d0231b52f8c04af99d (patch)
tree5ce5f30ef33b71ba320564f664a56c880d95a8f0
parent4ee3cc9c4a62659bc5f5ed59ea49a0b98b5ac670 (diff)
iommu/ipmmu-vmsa: PMD is never folded, PUD always is
The driver only supports the 3-level long descriptor format that has no PUD and always has a PMD. Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/ipmmu-vmsa.c66
1 files changed, 9 insertions, 57 deletions
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index e64c616208e2..1201aface539 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -215,7 +215,6 @@ static LIST_HEAD(ipmmu_devices);
215#define IPMMU_PTRS_PER_PTE 512 215#define IPMMU_PTRS_PER_PTE 512
216#define IPMMU_PTRS_PER_PMD 512 216#define IPMMU_PTRS_PER_PMD 512
217#define IPMMU_PTRS_PER_PGD 4 217#define IPMMU_PTRS_PER_PGD 4
218#define IPMMU_PTRS_PER_PUD 1
219 218
220/* ----------------------------------------------------------------------------- 219/* -----------------------------------------------------------------------------
221 * Read/Write Access 220 * Read/Write Access
@@ -465,6 +464,8 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
465 * Page Table Management 464 * Page Table Management
466 */ 465 */
467 466
467#define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
468
468static void ipmmu_free_ptes(pmd_t *pmd) 469static void ipmmu_free_ptes(pmd_t *pmd)
469{ 470{
470 pgtable_t table = pmd_pgtable(*pmd); 471 pgtable_t table = pmd_pgtable(*pmd);
@@ -473,10 +474,10 @@ static void ipmmu_free_ptes(pmd_t *pmd)
473 474
474static void ipmmu_free_pmds(pud_t *pud) 475static void ipmmu_free_pmds(pud_t *pud)
475{ 476{
476 pmd_t *pmd, *pmd_base = pmd_offset(pud, 0); 477 pmd_t *pmd = pmd_offset(pud, 0);
478 pgtable_t table;
477 unsigned int i; 479 unsigned int i;
478 480
479 pmd = pmd_base;
480 for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) { 481 for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
481 if (pmd_none(*pmd)) 482 if (pmd_none(*pmd))
482 continue; 483 continue;
@@ -485,24 +486,8 @@ static void ipmmu_free_pmds(pud_t *pud)
485 pmd++; 486 pmd++;
486 } 487 }
487 488
488 pmd_free(NULL, pmd_base); 489 table = pud_pgtable(*pud);
489} 490 __free_page(table);
490
491static void ipmmu_free_puds(pgd_t *pgd)
492{
493 pud_t *pud, *pud_base = pud_offset(pgd, 0);
494 unsigned int i;
495
496 pud = pud_base;
497 for (i = 0; i < IPMMU_PTRS_PER_PUD; ++i) {
498 if (pud_none(*pud))
499 continue;
500
501 ipmmu_free_pmds(pud);
502 pud++;
503 }
504
505 pud_free(NULL, pud_base);
506} 491}
507 492
508static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain) 493static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain)
@@ -520,7 +505,7 @@ static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain)
520 for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) { 505 for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) {
521 if (pgd_none(*pgd)) 506 if (pgd_none(*pgd))
522 continue; 507 continue;
523 ipmmu_free_puds(pgd); 508 ipmmu_free_pmds((pud_t *)pgd);
524 pgd++; 509 pgd++;
525 } 510 }
526 511
@@ -624,7 +609,6 @@ static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud,
624 pmd_t *pmd; 609 pmd_t *pmd;
625 int ret; 610 int ret;
626 611
627#ifndef __PAGETABLE_PMD_FOLDED
628 if (pud_none(*pud)) { 612 if (pud_none(*pud)) {
629 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); 613 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
630 if (!pmd) 614 if (!pmd)
@@ -636,7 +620,6 @@ static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud,
636 620
637 pmd += pmd_index(addr); 621 pmd += pmd_index(addr);
638 } else 622 } else
639#endif
640 pmd = pmd_offset(pud, addr); 623 pmd = pmd_offset(pud, addr);
641 624
642 do { 625 do {
@@ -648,38 +631,6 @@ static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud,
648 return ret; 631 return ret;
649} 632}
650 633
651static int ipmmu_alloc_init_pud(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
652 unsigned long addr, unsigned long end,
653 phys_addr_t phys, int prot)
654{
655 unsigned long next;
656 pud_t *pud;
657 int ret;
658
659#ifndef __PAGETABLE_PUD_FOLDED
660 if (pgd_none(*pgd)) {
661 pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
662 if (!pud)
663 return -ENOMEM;
664
665 ipmmu_flush_pgtable(mmu, pud, PAGE_SIZE);
666 *pgd = __pgd(__pa(pud) | PMD_NSTABLE | PMD_TYPE_TABLE);
667 ipmmu_flush_pgtable(mmu, pgd, sizeof(*pgd));
668
669 pud += pud_index(addr);
670 } else
671#endif
672 pud = pud_offset(pgd, addr);
673
674 do {
675 next = pud_addr_end(addr, end);
676 ret = ipmmu_alloc_init_pmd(mmu, pud, addr, next, phys, prot);
677 phys += next - addr;
678 } while (pud++, addr = next, addr < end);
679
680 return ret;
681}
682
683static int ipmmu_handle_mapping(struct ipmmu_vmsa_domain *domain, 634static int ipmmu_handle_mapping(struct ipmmu_vmsa_domain *domain,
684 unsigned long iova, phys_addr_t paddr, 635 unsigned long iova, phys_addr_t paddr,
685 size_t size, int prot) 636 size_t size, int prot)
@@ -707,7 +658,8 @@ static int ipmmu_handle_mapping(struct ipmmu_vmsa_domain *domain,
707 do { 658 do {
708 unsigned long next = pgd_addr_end(iova, end); 659 unsigned long next = pgd_addr_end(iova, end);
709 660
710 ret = ipmmu_alloc_init_pud(mmu, pgd, iova, next, paddr, prot); 661 ret = ipmmu_alloc_init_pmd(mmu, (pud_t *)pgd, iova, next, paddr,
662 prot);
711 if (ret) 663 if (ret)
712 break; 664 break;
713 665