aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorOhad Ben-Cohen <ohad@wizery.com>2011-11-10 04:32:25 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2011-11-10 05:40:37 -0500
commit5009065d38c95455bd2d27c2838313e3dd0c5bc7 (patch)
treea957af07e95166bcc014b3f333234fe74cef790e /drivers/iommu
parent1ea6b8f48918282bdca0b32a34095504ee65bab5 (diff)
iommu/core: stop converting bytes to page order back and forth
Express sizes in bytes rather than in page order, to eliminate the size->order->size conversions we have whenever the IOMMU API is calling the low level drivers' map/unmap methods. Adopt all existing drivers. Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Cc: David Brown <davidb@codeaurora.org> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Joerg Roedel <Joerg.Roedel@amd.com> Cc: Stepan Moskovchenko <stepanm@codeaurora.org> Cc: KyongHo Cho <pullip.cho@samsung.com> Cc: Hiroshi DOYU <hdoyu@nvidia.com> Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c13
-rw-r--r--drivers/iommu/intel-iommu.c11
-rw-r--r--drivers/iommu/iommu.c8
-rw-r--r--drivers/iommu/msm_iommu.c19
-rw-r--r--drivers/iommu/omap-iommu.c14
5 files changed, 26 insertions, 39 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4ee277a8521a..a3b7072e86e2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2702,9 +2702,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2702} 2702}
2703 2703
2704static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, 2704static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2705 phys_addr_t paddr, int gfp_order, int iommu_prot) 2705 phys_addr_t paddr, size_t page_size, int iommu_prot)
2706{ 2706{
2707 unsigned long page_size = 0x1000UL << gfp_order;
2708 struct protection_domain *domain = dom->priv; 2707 struct protection_domain *domain = dom->priv;
2709 int prot = 0; 2708 int prot = 0;
2710 int ret; 2709 int ret;
@@ -2721,13 +2720,11 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2721 return ret; 2720 return ret;
2722} 2721}
2723 2722
2724static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, 2723static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2725 int gfp_order) 2724 size_t page_size)
2726{ 2725{
2727 struct protection_domain *domain = dom->priv; 2726 struct protection_domain *domain = dom->priv;
2728 unsigned long page_size, unmap_size; 2727 size_t unmap_size;
2729
2730 page_size = 0x1000UL << gfp_order;
2731 2728
2732 mutex_lock(&domain->api_lock); 2729 mutex_lock(&domain->api_lock);
2733 unmap_size = iommu_unmap_page(domain, iova, page_size); 2730 unmap_size = iommu_unmap_page(domain, iova, page_size);
@@ -2735,7 +2732,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2735 2732
2736 domain_flush_tlb_pde(domain); 2733 domain_flush_tlb_pde(domain);
2737 2734
2738 return get_order(unmap_size); 2735 return unmap_size;
2739} 2736}
2740 2737
2741static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 2738static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c0c7820d4c46..2a165010a1c1 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3979,12 +3979,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
3979 3979
3980static int intel_iommu_map(struct iommu_domain *domain, 3980static int intel_iommu_map(struct iommu_domain *domain,
3981 unsigned long iova, phys_addr_t hpa, 3981 unsigned long iova, phys_addr_t hpa,
3982 int gfp_order, int iommu_prot) 3982 size_t size, int iommu_prot)
3983{ 3983{
3984 struct dmar_domain *dmar_domain = domain->priv; 3984 struct dmar_domain *dmar_domain = domain->priv;
3985 u64 max_addr; 3985 u64 max_addr;
3986 int prot = 0; 3986 int prot = 0;
3987 size_t size;
3988 int ret; 3987 int ret;
3989 3988
3990 if (iommu_prot & IOMMU_READ) 3989 if (iommu_prot & IOMMU_READ)
@@ -3994,7 +3993,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
3994 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) 3993 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3995 prot |= DMA_PTE_SNP; 3994 prot |= DMA_PTE_SNP;
3996 3995
3997 size = PAGE_SIZE << gfp_order;
3998 max_addr = iova + size; 3996 max_addr = iova + size;
3999 if (dmar_domain->max_addr < max_addr) { 3997 if (dmar_domain->max_addr < max_addr) {
4000 u64 end; 3998 u64 end;
@@ -4017,11 +4015,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
4017 return ret; 4015 return ret;
4018} 4016}
4019 4017
4020static int intel_iommu_unmap(struct iommu_domain *domain, 4018static size_t intel_iommu_unmap(struct iommu_domain *domain,
4021 unsigned long iova, int gfp_order) 4019 unsigned long iova, size_t size)
4022{ 4020{
4023 struct dmar_domain *dmar_domain = domain->priv; 4021 struct dmar_domain *dmar_domain = domain->priv;
4024 size_t size = PAGE_SIZE << gfp_order;
4025 int order; 4022 int order;
4026 4023
4027 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 4024 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4030,7 +4027,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
4030 if (dmar_domain->max_addr == iova + size) 4027 if (dmar_domain->max_addr == iova + size)
4031 dmar_domain->max_addr = iova; 4028 dmar_domain->max_addr = iova;
4032 4029
4033 return order; 4030 return PAGE_SIZE << order;
4034} 4031}
4035 4032
4036static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, 4033static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2fb2963df553..7a2953d8f12e 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -168,13 +168,13 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
168 168
169 BUG_ON(!IS_ALIGNED(iova | paddr, size)); 169 BUG_ON(!IS_ALIGNED(iova | paddr, size));
170 170
171 return domain->ops->map(domain, iova, paddr, gfp_order, prot); 171 return domain->ops->map(domain, iova, paddr, size, prot);
172} 172}
173EXPORT_SYMBOL_GPL(iommu_map); 173EXPORT_SYMBOL_GPL(iommu_map);
174 174
175int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) 175int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
176{ 176{
177 size_t size; 177 size_t size, unmapped;
178 178
179 if (unlikely(domain->ops->unmap == NULL)) 179 if (unlikely(domain->ops->unmap == NULL))
180 return -ENODEV; 180 return -ENODEV;
@@ -183,6 +183,8 @@ int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
183 183
184 BUG_ON(!IS_ALIGNED(iova, size)); 184 BUG_ON(!IS_ALIGNED(iova, size));
185 185
186 return domain->ops->unmap(domain, iova, gfp_order); 186 unmapped = domain->ops->unmap(domain, iova, size);
187
188 return get_order(unmapped);
187} 189}
188EXPORT_SYMBOL_GPL(iommu_unmap); 190EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 5865dd2e28f9..13718d958da8 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -352,7 +352,7 @@ fail:
352} 352}
353 353
354static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, 354static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
355 phys_addr_t pa, int order, int prot) 355 phys_addr_t pa, size_t len, int prot)
356{ 356{
357 struct msm_priv *priv; 357 struct msm_priv *priv;
358 unsigned long flags; 358 unsigned long flags;
@@ -363,7 +363,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
363 unsigned long *sl_pte; 363 unsigned long *sl_pte;
364 unsigned long sl_offset; 364 unsigned long sl_offset;
365 unsigned int pgprot; 365 unsigned int pgprot;
366 size_t len = 0x1000UL << order;
367 int ret = 0, tex, sh; 366 int ret = 0, tex, sh;
368 367
369 spin_lock_irqsave(&msm_iommu_lock, flags); 368 spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -463,8 +462,8 @@ fail:
463 return ret; 462 return ret;
464} 463}
465 464
466static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, 465static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
467 int order) 466 size_t len)
468{ 467{
469 struct msm_priv *priv; 468 struct msm_priv *priv;
470 unsigned long flags; 469 unsigned long flags;
@@ -474,7 +473,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
474 unsigned long *sl_table; 473 unsigned long *sl_table;
475 unsigned long *sl_pte; 474 unsigned long *sl_pte;
476 unsigned long sl_offset; 475 unsigned long sl_offset;
477 size_t len = 0x1000UL << order;
478 int i, ret = 0; 476 int i, ret = 0;
479 477
480 spin_lock_irqsave(&msm_iommu_lock, flags); 478 spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -544,15 +542,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
544 542
545 ret = __flush_iotlb(domain); 543 ret = __flush_iotlb(domain);
546 544
547 /*
548 * the IOMMU API requires us to return the order of the unmapped
549 * page (on success).
550 */
551 if (!ret)
552 ret = order;
553fail: 545fail:
554 spin_unlock_irqrestore(&msm_iommu_lock, flags); 546 spin_unlock_irqrestore(&msm_iommu_lock, flags);
555 return ret; 547
548 /* the IOMMU API requires us to return how many bytes were unmapped */
549 len = ret ? 0 : len;
550 return len;
556} 551}
557 552
558static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, 553static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 8f32b2bf7587..ad80b1d0d099 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1019,12 +1019,11 @@ static void iopte_cachep_ctor(void *iopte)
1019} 1019}
1020 1020
1021static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, 1021static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1022 phys_addr_t pa, int order, int prot) 1022 phys_addr_t pa, size_t bytes, int prot)
1023{ 1023{
1024 struct omap_iommu_domain *omap_domain = domain->priv; 1024 struct omap_iommu_domain *omap_domain = domain->priv;
1025 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1025 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1026 struct device *dev = oiommu->dev; 1026 struct device *dev = oiommu->dev;
1027 size_t bytes = PAGE_SIZE << order;
1028 struct iotlb_entry e; 1027 struct iotlb_entry e;
1029 int omap_pgsz; 1028 int omap_pgsz;
1030 u32 ret, flags; 1029 u32 ret, flags;
@@ -1049,19 +1048,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1049 return ret; 1048 return ret;
1050} 1049}
1051 1050
1052static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, 1051static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1053 int order) 1052 size_t size)
1054{ 1053{
1055 struct omap_iommu_domain *omap_domain = domain->priv; 1054 struct omap_iommu_domain *omap_domain = domain->priv;
1056 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1055 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1057 struct device *dev = oiommu->dev; 1056 struct device *dev = oiommu->dev;
1058 size_t unmap_size;
1059 1057
1060 dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order); 1058 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1061 1059
1062 unmap_size = iopgtable_clear_entry(oiommu, da); 1060 return iopgtable_clear_entry(oiommu, da);
1063
1064 return unmap_size ? get_order(unmap_size) : -EINVAL;
1065} 1061}
1066 1062
1067static int 1063static int