aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>2014-05-15 06:40:51 -0400
committerJoerg Roedel <jroedel@suse.de>2014-05-26 05:22:26 -0400
commit004c5b32fa5e1d0b31d9b4b77160504e416a0ceb (patch)
treeffa15cd2c83c7b29c6f134f3fcef5aa671ae1ad7
parentbec0ca0333d7030ef5b9afa7e4fd95b25d3ec338 (diff)
iommu/ipmmu-vmsa: Support clearing mappings
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/ipmmu-vmsa.c190
1 files changed, 186 insertions, 4 deletions
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 76f2550a7226..95b819a1be6d 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -192,14 +192,22 @@ static LIST_HEAD(ipmmu_devices);
192#define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8) 192#define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8)
193#define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8) 193#define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8)
194#define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8) 194#define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8)
195#define ARM_VMSA_PTE_SH_MASK (((pteval_t)3) << 8)
195#define ARM_VMSA_PTE_NS (((pteval_t)1) << 5) 196#define ARM_VMSA_PTE_NS (((pteval_t)1) << 5)
196#define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0) 197#define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0)
197 198
198/* Stage-1 PTE */ 199/* Stage-1 PTE */
200#define ARM_VMSA_PTE_nG (((pteval_t)1) << 11)
199#define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6) 201#define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6)
200#define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6) 202#define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6)
203#define ARM_VMSA_PTE_AP_MASK (((pteval_t)3) << 6)
204#define ARM_VMSA_PTE_ATTRINDX_MASK (((pteval_t)3) << 2)
201#define ARM_VMSA_PTE_ATTRINDX_SHIFT 2 205#define ARM_VMSA_PTE_ATTRINDX_SHIFT 2
202#define ARM_VMSA_PTE_nG (((pteval_t)1) << 11) 206
207#define ARM_VMSA_PTE_ATTRS_MASK \
208 (ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \
209 ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \
210 ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK)
203 211
204#define ARM_VMSA_PTE_CONT_ENTRIES 16 212#define ARM_VMSA_PTE_CONT_ENTRIES 16
205#define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES) 213#define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES)
@@ -614,7 +622,7 @@ static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
614 return 0; 622 return 0;
615} 623}
616 624
617static int ipmmu_handle_mapping(struct ipmmu_vmsa_domain *domain, 625static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain,
618 unsigned long iova, phys_addr_t paddr, 626 unsigned long iova, phys_addr_t paddr,
619 size_t size, int prot) 627 size_t size, int prot)
620{ 628{
@@ -668,6 +676,180 @@ done:
668 return ret; 676 return ret;
669} 677}
670 678
679static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud)
680{
681 /* Free the page table. */
682 pgtable_t table = pud_pgtable(*pud);
683 __free_page(table);
684
685 /* Clear the PUD. */
686 *pud = __pud(0);
687 ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
688}
689
690static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud,
691 pmd_t *pmd)
692{
693 unsigned int i;
694
695 /* Free the page table. */
696 if (pmd_table(*pmd)) {
697 pgtable_t table = pmd_pgtable(*pmd);
698 __free_page(table);
699 }
700
701 /* Clear the PMD. */
702 *pmd = __pmd(0);
703 ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
704
705 /* Check whether the PUD is still needed. */
706 pmd = pmd_offset(pud, 0);
707 for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
708 if (!pmd_none(pmd[i]))
709 return;
710 }
711
712 /* Clear the parent PUD. */
713 ipmmu_clear_pud(mmu, pud);
714}
715
716static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud,
717 pmd_t *pmd, pte_t *pte, unsigned int num_ptes)
718{
719 unsigned int i;
720
721 /* Clear the PTE. */
722 for (i = num_ptes; i; --i)
723 pte[i-1] = __pte(0);
724
725 ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes);
726
727 /* Check whether the PMD is still needed. */
728 pte = pte_offset_kernel(pmd, 0);
729 for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) {
730 if (!pte_none(pte[i]))
731 return;
732 }
733
734 /* Clear the parent PMD. */
735 ipmmu_clear_pmd(mmu, pud, pmd);
736}
737
738static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd)
739{
740 pte_t *pte, *start;
741 pteval_t pteval;
742 unsigned long pfn;
743 unsigned int i;
744
745 pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
746 if (!pte)
747 return -ENOMEM;
748
749 /* Copy the PMD attributes. */
750 pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK)
751 | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE;
752
753 pfn = pmd_pfn(*pmd);
754 start = pte;
755
756 for (i = IPMMU_PTRS_PER_PTE; i; --i)
757 *pte++ = pfn_pte(pfn++, __pgprot(pteval));
758
759 ipmmu_flush_pgtable(mmu, start, PAGE_SIZE);
760 *pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE);
761 ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
762
763 return 0;
764}
765
766static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte)
767{
768 unsigned int i;
769
770 for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i)
771 pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT);
772
773 ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES);
774}
775
776static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain,
777 unsigned long iova, size_t size)
778{
779 struct ipmmu_vmsa_device *mmu = domain->mmu;
780 unsigned long flags;
781 pgd_t *pgd = domain->pgd;
782 pud_t *pud;
783 pmd_t *pmd;
784 pte_t *pte;
785 int ret = 0;
786
787 if (!pgd)
788 return -EINVAL;
789
790 if (size & ~PAGE_MASK)
791 return -EINVAL;
792
793 pgd += pgd_index(iova);
794 pud = (pud_t *)pgd;
795
796 spin_lock_irqsave(&domain->lock, flags);
797
798 /* If there's no PUD or PMD we're done. */
799 if (pud_none(*pud))
800 goto done;
801
802 pmd = pmd_offset(pud, iova);
803 if (pmd_none(*pmd))
804 goto done;
805
806 /*
807 * When freeing a 2MB block just clear the PMD. In the unlikely case the
808 * block is mapped as individual pages this will free the corresponding
809 * PTE page table.
810 */
811 if (size == SZ_2M) {
812 ipmmu_clear_pmd(mmu, pud, pmd);
813 goto done;
814 }
815
816 /*
817 * If the PMD has been mapped as a section remap it as pages to allow
818 * freeing individual pages.
819 */
820 if (pmd_sect(*pmd))
821 ipmmu_split_pmd(mmu, pmd);
822
823 pte = pte_offset_kernel(pmd, iova);
824
825 /*
826 * When freeing a 64kB block just clear the PTE entries. We don't have
827 * to care about the contiguous hint of the surrounding entries.
828 */
829 if (size == SZ_64K) {
830 ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES);
831 goto done;
832 }
833
834 /*
835 * If the PTE has been mapped with the contiguous hint set remap it and
836 * its surrounding PTEs to allow unmapping a single page.
837 */
838 if (pte_val(*pte) & ARM_VMSA_PTE_CONT)
839 ipmmu_split_pte(mmu, pte);
840
841 /* Clear the PTE. */
842 ipmmu_clear_pte(mmu, pud, pmd, pte, 1);
843
844done:
845 spin_unlock_irqrestore(&domain->lock, flags);
846
847 if (ret)
848 ipmmu_tlb_invalidate(domain);
849
850 return 0;
851}
852
671/* ----------------------------------------------------------------------------- 853/* -----------------------------------------------------------------------------
672 * IOMMU Operations 854 * IOMMU Operations
673 */ 855 */
@@ -768,7 +950,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
768 if (!domain) 950 if (!domain)
769 return -ENODEV; 951 return -ENODEV;
770 952
771 return ipmmu_handle_mapping(domain, iova, paddr, size, prot); 953 return ipmmu_create_mapping(domain, iova, paddr, size, prot);
772} 954}
773 955
774static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, 956static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
@@ -777,7 +959,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
777 struct ipmmu_vmsa_domain *domain = io_domain->priv; 959 struct ipmmu_vmsa_domain *domain = io_domain->priv;
778 int ret; 960 int ret;
779 961
780 ret = ipmmu_handle_mapping(domain, iova, 0, size, 0); 962 ret = ipmmu_clear_mapping(domain, iova, size);
781 return ret ? 0 : size; 963 return ret ? 0 : size;
782} 964}
783 965