aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2013-06-17 21:57:34 -0400
committerJoerg Roedel <joro@8bytes.org>2013-06-20 11:26:25 -0400
commitbd13969b952491149e641d3dab24fa59b98f82e9 (patch)
tree0b4ce920b7c668ed94d5c4f91dfe0e654335af30 /drivers/iommu
parent7d132055814ef17a6c7b69f342244c410a5e000f (diff)
iommu: Split iommu_unmaps
iommu_map splits requests into pages that the iommu driver reports that it can handle. The iommu_unmap path does not do the same. This can cause problems not only from callers that might expect the same behavior as the map path, but even from the failure path of iommu_map, should it fail at a point where it has mapped and needs to unwind a set of pages that the iommu driver cannot handle directly. amd_iommu, for example, will BUG_ON if asked to unmap a non power of 2 size. Fix this by extracting and generalizing the sizing code from the iommu_map path and use it for both map and unmap. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/iommu.c63
1 files changed, 35 insertions, 28 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d8f98b14e2fe..4b0b56b0501d 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
754} 754}
755EXPORT_SYMBOL_GPL(iommu_domain_has_cap); 755EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
756 756
757static size_t iommu_pgsize(struct iommu_domain *domain,
758 unsigned long addr_merge, size_t size)
759{
760 unsigned int pgsize_idx;
761 size_t pgsize;
762
763 /* Max page size that still fits into 'size' */
764 pgsize_idx = __fls(size);
765
766 /* need to consider alignment requirements ? */
767 if (likely(addr_merge)) {
768 /* Max page size allowed by address */
769 unsigned int align_pgsize_idx = __ffs(addr_merge);
770 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
771 }
772
773 /* build a mask of acceptable page sizes */
774 pgsize = (1UL << (pgsize_idx + 1)) - 1;
775
776 /* throw away page sizes not supported by the hardware */
777 pgsize &= domain->ops->pgsize_bitmap;
778
779 /* make sure we're still sane */
780 BUG_ON(!pgsize);
781
782 /* pick the biggest page */
783 pgsize_idx = __fls(pgsize);
784 pgsize = 1UL << pgsize_idx;
785
786 return pgsize;
787}
788
757int iommu_map(struct iommu_domain *domain, unsigned long iova, 789int iommu_map(struct iommu_domain *domain, unsigned long iova,
758 phys_addr_t paddr, size_t size, int prot) 790 phys_addr_t paddr, size_t size, int prot)
759{ 791{
@@ -785,32 +817,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
785 (unsigned long)paddr, (unsigned long)size); 817 (unsigned long)paddr, (unsigned long)size);
786 818
787 while (size) { 819 while (size) {
788 unsigned long pgsize, addr_merge = iova | paddr; 820 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
789 unsigned int pgsize_idx;
790
791 /* Max page size that still fits into 'size' */
792 pgsize_idx = __fls(size);
793
794 /* need to consider alignment requirements ? */
795 if (likely(addr_merge)) {
796 /* Max page size allowed by both iova and paddr */
797 unsigned int align_pgsize_idx = __ffs(addr_merge);
798
799 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
800 }
801
802 /* build a mask of acceptable page sizes */
803 pgsize = (1UL << (pgsize_idx + 1)) - 1;
804
805 /* throw away page sizes not supported by the hardware */
806 pgsize &= domain->ops->pgsize_bitmap;
807
808 /* make sure we're still sane */
809 BUG_ON(!pgsize);
810
811 /* pick the biggest page */
812 pgsize_idx = __fls(pgsize);
813 pgsize = 1UL << pgsize_idx;
814 821
815 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova, 822 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
816 (unsigned long)paddr, pgsize); 823 (unsigned long)paddr, pgsize);
@@ -863,9 +870,9 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
863 * or we hit an area that isn't mapped. 870 * or we hit an area that isn't mapped.
864 */ 871 */
865 while (unmapped < size) { 872 while (unmapped < size) {
866 size_t left = size - unmapped; 873 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
867 874
868 unmapped_page = domain->ops->unmap(domain, iova, left); 875 unmapped_page = domain->ops->unmap(domain, iova, pgsize);
869 if (!unmapped_page) 876 if (!unmapped_page)
870 break; 877 break;
871 878