aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>2014-05-15 06:40:48 -0400
committerJoerg Roedel <jroedel@suse.de>2014-05-26 05:22:24 -0400
commit9009f256596da78567d63c434691f7e409a99400 (patch)
treecde6fd9b341da688a5c0a3d22c5255ef116d6293
parent14e5123ee9bb0d91ceb451d0231b52f8c04af99d (diff)
iommu/ipmmu-vmsa: Rewrite page table management
The IOMMU core will only call us with page sizes advertized as supported by the driver. We can thus simplify the code by removing loops over PGD and PMD entries. Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/ipmmu-vmsa.c193
1 files changed, 86 insertions, 107 deletions
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 1201aface539..87703c3faf58 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -517,118 +517,97 @@ static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain)
517 * functions as they would flush the CPU TLB. 517 * functions as they would flush the CPU TLB.
518 */ 518 */
519 519
520static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, 520static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
521 unsigned long addr, unsigned long end, 521 unsigned long iova)
522 phys_addr_t phys, int prot)
523{ 522{
524 unsigned long pfn = __phys_to_pfn(phys); 523 pte_t *pte;
525 pteval_t pteval = ARM_VMSA_PTE_PAGE | ARM_VMSA_PTE_NS | ARM_VMSA_PTE_AF
526 | ARM_VMSA_PTE_XN;
527 pte_t *pte, *start;
528 524
529 if (pmd_none(*pmd)) { 525 if (!pmd_none(*pmd))
530 /* Allocate a new set of tables */ 526 return pte_offset_kernel(pmd, iova);
531 pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
532 if (!pte)
533 return -ENOMEM;
534 527
535 ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE); 528 pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
536 *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE); 529 if (!pte)
537 ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); 530 return NULL;
538 531
539 pte += pte_index(addr); 532 ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE);
540 } else 533 *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE);
541 pte = pte_offset_kernel(pmd, addr); 534 ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
542 535
543 pteval |= ARM_VMSA_PTE_AP_UNPRIV | ARM_VMSA_PTE_nG; 536 return pte + pte_index(iova);
544 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 537}
545 pteval |= ARM_VMSA_PTE_AP_RDONLY;
546 538
547 if (prot & IOMMU_CACHE) 539static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
548 pteval |= (IMMAIR_ATTR_IDX_WBRWA << 540 unsigned long iova)
549 ARM_VMSA_PTE_ATTRINDX_SHIFT); 541{
542 pud_t *pud = (pud_t *)pgd;
543 pmd_t *pmd;
550 544
551 /* If no access, create a faulting entry to avoid TLB fills */ 545 if (!pud_none(*pud))
552 if (prot & IOMMU_EXEC) 546 return pmd_offset(pud, iova);
553 pteval &= ~ARM_VMSA_PTE_XN;
554 else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
555 pteval &= ~ARM_VMSA_PTE_PAGE;
556 547
557 pteval |= ARM_VMSA_PTE_SH_IS; 548 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
558 start = pte; 549 if (!pmd)
550 return NULL;
559 551
560 /* 552 ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE);
561 * Install the page table entries. 553 *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE);
562 * 554 ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
563 * Set the contiguous hint in the PTEs where possible. The hint
564 * indicates a series of ARM_VMSA_PTE_CONT_ENTRIES PTEs mapping a
565 * physically contiguous region with the following constraints:
566 *
567 * - The region start is aligned to ARM_VMSA_PTE_CONT_SIZE
568 * - Each PTE in the region has the contiguous hint bit set
569 *
570 * We don't support partial unmapping so there's no need to care about
571 * clearing the contiguous hint from neighbour PTEs.
572 */
573 do {
574 unsigned long chunk_end;
575 555
576 /* 556 return pmd + pmd_index(iova);
577 * If the address is aligned to a contiguous region size and the 557}
578 * mapping size is large enough, process the largest possible
579 * number of PTEs multiple of ARM_VMSA_PTE_CONT_ENTRIES.
580 * Otherwise process the smallest number of PTEs to align the
581 * address to a contiguous region size or to complete the
582 * mapping.
583 */
584 if (IS_ALIGNED(addr, ARM_VMSA_PTE_CONT_SIZE) &&
585 end - addr >= ARM_VMSA_PTE_CONT_SIZE) {
586 chunk_end = round_down(end, ARM_VMSA_PTE_CONT_SIZE);
587 pteval |= ARM_VMSA_PTE_CONT;
588 } else {
589 chunk_end = min(ALIGN(addr, ARM_VMSA_PTE_CONT_SIZE),
590 end);
591 pteval &= ~ARM_VMSA_PTE_CONT;
592 }
593 558
594 do { 559static u64 ipmmu_page_prot(unsigned int prot, u64 type)
595 *pte++ = pfn_pte(pfn++, __pgprot(pteval)); 560{
596 addr += PAGE_SIZE; 561 u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
597 } while (addr != chunk_end); 562 | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
598 } while (addr != end); 563 | ARM_VMSA_PTE_NS | type;
599 564
600 ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * (pte - start)); 565 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
601 return 0; 566 pgprot |= ARM_VMSA_PTE_AP_RDONLY;
567
568 if (prot & IOMMU_CACHE)
569 pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
570
571 if (prot & IOMMU_EXEC)
572 pgprot &= ~ARM_VMSA_PTE_XN;
573 else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
574 /* If no access create a faulting entry to avoid TLB fills. */
575 pgprot &= ~ARM_VMSA_PTE_PAGE;
576
577 return pgprot;
602} 578}
603 579
604static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud, 580static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
605 unsigned long addr, unsigned long end, 581 unsigned long iova, unsigned long pfn,
606 phys_addr_t phys, int prot) 582 size_t size, int prot)
607{ 583{
608 unsigned long next; 584 pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE);
609 pmd_t *pmd; 585 unsigned int num_ptes = 1;
610 int ret; 586 pte_t *pte, *start;
587 unsigned int i;
611 588
612 if (pud_none(*pud)) { 589 pte = ipmmu_alloc_pte(mmu, pmd, iova);
613 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); 590 if (!pte)
614 if (!pmd) 591 return -ENOMEM;
615 return -ENOMEM; 592
593 start = pte;
616 594
617 ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE); 595 /*
618 *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE); 596 * Install the page table entries. We can be called both for a single
619 ipmmu_flush_pgtable(mmu, pud, sizeof(*pud)); 597 * page or for a block of 16 physically contiguous pages. In the latter
598 * case set the PTE contiguous hint.
599 */
600 if (size == SZ_64K) {
601 pteval |= ARM_VMSA_PTE_CONT;
602 num_ptes = ARM_VMSA_PTE_CONT_ENTRIES;
603 }
620 604
621 pmd += pmd_index(addr); 605 for (i = num_ptes; i; --i)
622 } else 606 *pte++ = pfn_pte(pfn++, __pgprot(pteval));
623 pmd = pmd_offset(pud, addr);
624 607
625 do { 608 ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes);
626 next = pmd_addr_end(addr, end);
627 ret = ipmmu_alloc_init_pte(mmu, pmd, addr, end, phys, prot);
628 phys += next - addr;
629 } while (pmd++, addr = next, addr < end);
630 609
631 return ret; 610 return 0;
632} 611}
633 612
634static int ipmmu_handle_mapping(struct ipmmu_vmsa_domain *domain, 613static int ipmmu_handle_mapping(struct ipmmu_vmsa_domain *domain,
@@ -638,7 +617,8 @@ static int ipmmu_handle_mapping(struct ipmmu_vmsa_domain *domain,
638 struct ipmmu_vmsa_device *mmu = domain->mmu; 617 struct ipmmu_vmsa_device *mmu = domain->mmu;
639 pgd_t *pgd = domain->pgd; 618 pgd_t *pgd = domain->pgd;
640 unsigned long flags; 619 unsigned long flags;
641 unsigned long end; 620 unsigned long pfn;
621 pmd_t *pmd;
642 int ret; 622 int ret;
643 623
644 if (!pgd) 624 if (!pgd)
@@ -650,26 +630,25 @@ static int ipmmu_handle_mapping(struct ipmmu_vmsa_domain *domain,
650 if (paddr & ~((1ULL << 40) - 1)) 630 if (paddr & ~((1ULL << 40) - 1))
651 return -ERANGE; 631 return -ERANGE;
652 632
653 spin_lock_irqsave(&domain->lock, flags); 633 pfn = __phys_to_pfn(paddr);
654
655 pgd += pgd_index(iova); 634 pgd += pgd_index(iova);
656 end = iova + size;
657 635
658 do { 636 /* Update the page tables. */
659 unsigned long next = pgd_addr_end(iova, end); 637 spin_lock_irqsave(&domain->lock, flags);
660 638
661 ret = ipmmu_alloc_init_pmd(mmu, (pud_t *)pgd, iova, next, paddr, 639 pmd = ipmmu_alloc_pmd(mmu, pgd, iova);
662 prot); 640 if (!pmd) {
663 if (ret) 641 ret = -ENOMEM;
664 break; 642 goto done;
643 }
665 644
666 paddr += next - iova; 645 ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot);
667 iova = next;
668 } while (pgd++, iova != end);
669 646
647done:
670 spin_unlock_irqrestore(&domain->lock, flags); 648 spin_unlock_irqrestore(&domain->lock, flags);
671 649
672 ipmmu_tlb_invalidate(domain); 650 if (!ret)
651 ipmmu_tlb_invalidate(domain);
673 652
674 return ret; 653 return ret;
675} 654}
@@ -951,7 +930,7 @@ static struct iommu_ops ipmmu_ops = {
951 .iova_to_phys = ipmmu_iova_to_phys, 930 .iova_to_phys = ipmmu_iova_to_phys,
952 .add_device = ipmmu_add_device, 931 .add_device = ipmmu_add_device,
953 .remove_device = ipmmu_remove_device, 932 .remove_device = ipmmu_remove_device,
954 .pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K, 933 .pgsize_bitmap = SZ_64K | SZ_4K,
955}; 934};
956 935
957/* ----------------------------------------------------------------------------- 936/* -----------------------------------------------------------------------------