aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-12-14 06:41:31 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2011-12-14 06:52:09 -0500
commita06ec394c9318e2ee9209ca3c106d3fa6fbfeb00 (patch)
treef94f71b5a542b42ecea60e6f903e3f19e5b2eada /drivers/iommu
parent175d6146738b3d04e1adcaa4a971a3b2b0dbd8af (diff)
parent6c274d1cd5b3aa0834e9f0c3f58038f42278ff8c (diff)
Merge branch 'iommu/page-sizes' into x86/amd
Conflicts: drivers/iommu/amd_iommu.c
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c32
-rw-r--r--drivers/iommu/intel-iommu.c30
-rw-r--r--drivers/iommu/iommu.c119
-rw-r--r--drivers/iommu/msm_iommu.c25
-rw-r--r--drivers/iommu/omap-iommu.c18
-rw-r--r--drivers/iommu/omap-iovmm.c17
6 files changed, 181 insertions, 60 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2a46b1d7a601..a7cbcd46af9e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -44,6 +44,24 @@
44 44
45#define LOOP_TIMEOUT 100000 45#define LOOP_TIMEOUT 100000
46 46
47/*
48 * This bitmap is used to advertise the page sizes our hardware support
49 * to the IOMMU core, which will then use this information to split
50 * physically contiguous memory regions it is mapping into page sizes
51 * that we support.
52 *
53 * Traditionally the IOMMU core just handed us the mappings directly,
54 * after making sure the size is an order of a 4KiB page and that the
55 * mapping has natural alignment.
56 *
57 * To retain this behavior, we currently advertise that we support
58 * all page sizes that are an order of 4KiB.
59 *
60 * If at some point we'd like to utilize the IOMMU core's new behavior,
61 * we could change this to advertise the real page sizes we support.
62 */
63#define AMD_IOMMU_PGSIZES (~0xFFFUL)
64
47static DEFINE_RWLOCK(amd_iommu_devtable_lock); 65static DEFINE_RWLOCK(amd_iommu_devtable_lock);
48 66
49/* A list of preallocated protection domains */ 67/* A list of preallocated protection domains */
@@ -3093,9 +3111,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
3093} 3111}
3094 3112
3095static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, 3113static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
3096 phys_addr_t paddr, int gfp_order, int iommu_prot) 3114 phys_addr_t paddr, size_t page_size, int iommu_prot)
3097{ 3115{
3098 unsigned long page_size = 0x1000UL << gfp_order;
3099 struct protection_domain *domain = dom->priv; 3116 struct protection_domain *domain = dom->priv;
3100 int prot = 0; 3117 int prot = 0;
3101 int ret; 3118 int ret;
@@ -3115,24 +3132,22 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
3115 return ret; 3132 return ret;
3116} 3133}
3117 3134
3118static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, 3135static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3119 int gfp_order) 3136 size_t page_size)
3120{ 3137{
3121 struct protection_domain *domain = dom->priv; 3138 struct protection_domain *domain = dom->priv;
3122 unsigned long page_size, unmap_size; 3139 size_t unmap_size;
3123 3140
3124 if (domain->mode == PAGE_MODE_NONE) 3141 if (domain->mode == PAGE_MODE_NONE)
3125 return -EINVAL; 3142 return -EINVAL;
3126 3143
3127 page_size = 0x1000UL << gfp_order;
3128
3129 mutex_lock(&domain->api_lock); 3144 mutex_lock(&domain->api_lock);
3130 unmap_size = iommu_unmap_page(domain, iova, page_size); 3145 unmap_size = iommu_unmap_page(domain, iova, page_size);
3131 mutex_unlock(&domain->api_lock); 3146 mutex_unlock(&domain->api_lock);
3132 3147
3133 domain_flush_tlb_pde(domain); 3148 domain_flush_tlb_pde(domain);
3134 3149
3135 return get_order(unmap_size); 3150 return unmap_size;
3136} 3151}
3137 3152
3138static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 3153static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -3182,6 +3197,7 @@ static struct iommu_ops amd_iommu_ops = {
3182 .unmap = amd_iommu_unmap, 3197 .unmap = amd_iommu_unmap,
3183 .iova_to_phys = amd_iommu_iova_to_phys, 3198 .iova_to_phys = amd_iommu_iova_to_phys,
3184 .domain_has_cap = amd_iommu_domain_has_cap, 3199 .domain_has_cap = amd_iommu_domain_has_cap,
3200 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
3185}; 3201};
3186 3202
3187/***************************************************************************** 3203/*****************************************************************************
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a004c3945c67..c181883c2f9a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -78,6 +78,24 @@
78#define LEVEL_STRIDE (9) 78#define LEVEL_STRIDE (9)
79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) 79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
80 80
81/*
82 * This bitmap is used to advertise the page sizes our hardware support
83 * to the IOMMU core, which will then use this information to split
84 * physically contiguous memory regions it is mapping into page sizes
85 * that we support.
86 *
87 * Traditionally the IOMMU core just handed us the mappings directly,
88 * after making sure the size is an order of a 4KiB page and that the
89 * mapping has natural alignment.
90 *
91 * To retain this behavior, we currently advertise that we support
92 * all page sizes that are an order of 4KiB.
93 *
94 * If at some point we'd like to utilize the IOMMU core's new behavior,
95 * we could change this to advertise the real page sizes we support.
96 */
97#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
98
81static inline int agaw_to_level(int agaw) 99static inline int agaw_to_level(int agaw)
82{ 100{
83 return agaw + 2; 101 return agaw + 2;
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
3979 3997
3980static int intel_iommu_map(struct iommu_domain *domain, 3998static int intel_iommu_map(struct iommu_domain *domain,
3981 unsigned long iova, phys_addr_t hpa, 3999 unsigned long iova, phys_addr_t hpa,
3982 int gfp_order, int iommu_prot) 4000 size_t size, int iommu_prot)
3983{ 4001{
3984 struct dmar_domain *dmar_domain = domain->priv; 4002 struct dmar_domain *dmar_domain = domain->priv;
3985 u64 max_addr; 4003 u64 max_addr;
3986 int prot = 0; 4004 int prot = 0;
3987 size_t size;
3988 int ret; 4005 int ret;
3989 4006
3990 if (iommu_prot & IOMMU_READ) 4007 if (iommu_prot & IOMMU_READ)
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
3994 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) 4011 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3995 prot |= DMA_PTE_SNP; 4012 prot |= DMA_PTE_SNP;
3996 4013
3997 size = PAGE_SIZE << gfp_order;
3998 max_addr = iova + size; 4014 max_addr = iova + size;
3999 if (dmar_domain->max_addr < max_addr) { 4015 if (dmar_domain->max_addr < max_addr) {
4000 u64 end; 4016 u64 end;
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
4017 return ret; 4033 return ret;
4018} 4034}
4019 4035
4020static int intel_iommu_unmap(struct iommu_domain *domain, 4036static size_t intel_iommu_unmap(struct iommu_domain *domain,
4021 unsigned long iova, int gfp_order) 4037 unsigned long iova, size_t size)
4022{ 4038{
4023 struct dmar_domain *dmar_domain = domain->priv; 4039 struct dmar_domain *dmar_domain = domain->priv;
4024 size_t size = PAGE_SIZE << gfp_order;
4025 int order; 4040 int order;
4026 4041
4027 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 4042 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
4030 if (dmar_domain->max_addr == iova + size) 4045 if (dmar_domain->max_addr == iova + size)
4031 dmar_domain->max_addr = iova; 4046 dmar_domain->max_addr = iova;
4032 4047
4033 return order; 4048 return PAGE_SIZE << order;
4034} 4049}
4035 4050
4036static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, 4051static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4069,6 +4084,7 @@ static struct iommu_ops intel_iommu_ops = {
4069 .unmap = intel_iommu_unmap, 4084 .unmap = intel_iommu_unmap,
4070 .iova_to_phys = intel_iommu_iova_to_phys, 4085 .iova_to_phys = intel_iommu_iova_to_phys,
4071 .domain_has_cap = intel_iommu_domain_has_cap, 4086 .domain_has_cap = intel_iommu_domain_has_cap,
4087 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4072}; 4088};
4073 4089
4074static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) 4090static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2fb2963df553..84cdd8ac81f1 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -16,6 +16,8 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 17 */
18 18
19#define pr_fmt(fmt) "%s: " fmt, __func__
20
19#include <linux/device.h> 21#include <linux/device.h>
20#include <linux/kernel.h> 22#include <linux/kernel.h>
21#include <linux/bug.h> 23#include <linux/bug.h>
@@ -157,32 +159,125 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
157EXPORT_SYMBOL_GPL(iommu_domain_has_cap); 159EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
158 160
159int iommu_map(struct iommu_domain *domain, unsigned long iova, 161int iommu_map(struct iommu_domain *domain, unsigned long iova,
160 phys_addr_t paddr, int gfp_order, int prot) 162 phys_addr_t paddr, size_t size, int prot)
161{ 163{
162 size_t size; 164 unsigned long orig_iova = iova;
165 unsigned int min_pagesz;
166 size_t orig_size = size;
167 int ret = 0;
163 168
164 if (unlikely(domain->ops->map == NULL)) 169 if (unlikely(domain->ops->map == NULL))
165 return -ENODEV; 170 return -ENODEV;
166 171
167 size = PAGE_SIZE << gfp_order; 172 /* find out the minimum page size supported */
173 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
174
175 /*
176 * both the virtual address and the physical one, as well as
177 * the size of the mapping, must be aligned (at least) to the
178 * size of the smallest page supported by the hardware
179 */
180 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
181 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
182 "0x%x\n", iova, (unsigned long)paddr,
183 (unsigned long)size, min_pagesz);
184 return -EINVAL;
185 }
186
187 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
188 (unsigned long)paddr, (unsigned long)size);
189
190 while (size) {
191 unsigned long pgsize, addr_merge = iova | paddr;
192 unsigned int pgsize_idx;
193
194 /* Max page size that still fits into 'size' */
195 pgsize_idx = __fls(size);
196
197 /* need to consider alignment requirements ? */
198 if (likely(addr_merge)) {
199 /* Max page size allowed by both iova and paddr */
200 unsigned int align_pgsize_idx = __ffs(addr_merge);
201
202 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
203 }
204
205 /* build a mask of acceptable page sizes */
206 pgsize = (1UL << (pgsize_idx + 1)) - 1;
207
208 /* throw away page sizes not supported by the hardware */
209 pgsize &= domain->ops->pgsize_bitmap;
168 210
169 BUG_ON(!IS_ALIGNED(iova | paddr, size)); 211 /* make sure we're still sane */
212 BUG_ON(!pgsize);
170 213
171 return domain->ops->map(domain, iova, paddr, gfp_order, prot); 214 /* pick the biggest page */
215 pgsize_idx = __fls(pgsize);
216 pgsize = 1UL << pgsize_idx;
217
218 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
219 (unsigned long)paddr, pgsize);
220
221 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
222 if (ret)
223 break;
224
225 iova += pgsize;
226 paddr += pgsize;
227 size -= pgsize;
228 }
229
230 /* unroll mapping in case something went wrong */
231 if (ret)
232 iommu_unmap(domain, orig_iova, orig_size - size);
233
234 return ret;
172} 235}
173EXPORT_SYMBOL_GPL(iommu_map); 236EXPORT_SYMBOL_GPL(iommu_map);
174 237
175int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) 238size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
176{ 239{
177 size_t size; 240 size_t unmapped_page, unmapped = 0;
241 unsigned int min_pagesz;
178 242
179 if (unlikely(domain->ops->unmap == NULL)) 243 if (unlikely(domain->ops->unmap == NULL))
180 return -ENODEV; 244 return -ENODEV;
181 245
182 size = PAGE_SIZE << gfp_order; 246 /* find out the minimum page size supported */
183 247 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
184 BUG_ON(!IS_ALIGNED(iova, size)); 248
185 249 /*
186 return domain->ops->unmap(domain, iova, gfp_order); 250 * The virtual address, as well as the size of the mapping, must be
251 * aligned (at least) to the size of the smallest page supported
252 * by the hardware
253 */
254 if (!IS_ALIGNED(iova | size, min_pagesz)) {
255 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
256 iova, (unsigned long)size, min_pagesz);
257 return -EINVAL;
258 }
259
260 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
261 (unsigned long)size);
262
263 /*
264 * Keep iterating until we either unmap 'size' bytes (or more)
265 * or we hit an area that isn't mapped.
266 */
267 while (unmapped < size) {
268 size_t left = size - unmapped;
269
270 unmapped_page = domain->ops->unmap(domain, iova, left);
271 if (!unmapped_page)
272 break;
273
274 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
275 (unsigned long)unmapped_page);
276
277 iova += unmapped_page;
278 unmapped += unmapped_page;
279 }
280
281 return unmapped;
187} 282}
188EXPORT_SYMBOL_GPL(iommu_unmap); 283EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 5865dd2e28f9..08a90b88e40d 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -42,6 +42,9 @@ __asm__ __volatile__ ( \
42#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) 42#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
43#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) 43#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
44 44
45/* bitmap of the page sizes currently supported */
46#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
47
45static int msm_iommu_tex_class[4]; 48static int msm_iommu_tex_class[4];
46 49
47DEFINE_SPINLOCK(msm_iommu_lock); 50DEFINE_SPINLOCK(msm_iommu_lock);
@@ -352,7 +355,7 @@ fail:
352} 355}
353 356
354static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, 357static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
355 phys_addr_t pa, int order, int prot) 358 phys_addr_t pa, size_t len, int prot)
356{ 359{
357 struct msm_priv *priv; 360 struct msm_priv *priv;
358 unsigned long flags; 361 unsigned long flags;
@@ -363,7 +366,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
363 unsigned long *sl_pte; 366 unsigned long *sl_pte;
364 unsigned long sl_offset; 367 unsigned long sl_offset;
365 unsigned int pgprot; 368 unsigned int pgprot;
366 size_t len = 0x1000UL << order;
367 int ret = 0, tex, sh; 369 int ret = 0, tex, sh;
368 370
369 spin_lock_irqsave(&msm_iommu_lock, flags); 371 spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -463,8 +465,8 @@ fail:
463 return ret; 465 return ret;
464} 466}
465 467
466static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, 468static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
467 int order) 469 size_t len)
468{ 470{
469 struct msm_priv *priv; 471 struct msm_priv *priv;
470 unsigned long flags; 472 unsigned long flags;
@@ -474,7 +476,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
474 unsigned long *sl_table; 476 unsigned long *sl_table;
475 unsigned long *sl_pte; 477 unsigned long *sl_pte;
476 unsigned long sl_offset; 478 unsigned long sl_offset;
477 size_t len = 0x1000UL << order;
478 int i, ret = 0; 479 int i, ret = 0;
479 480
480 spin_lock_irqsave(&msm_iommu_lock, flags); 481 spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -544,15 +545,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
544 545
545 ret = __flush_iotlb(domain); 546 ret = __flush_iotlb(domain);
546 547
547 /*
548 * the IOMMU API requires us to return the order of the unmapped
549 * page (on success).
550 */
551 if (!ret)
552 ret = order;
553fail: 548fail:
554 spin_unlock_irqrestore(&msm_iommu_lock, flags); 549 spin_unlock_irqrestore(&msm_iommu_lock, flags);
555 return ret; 550
551 /* the IOMMU API requires us to return how many bytes were unmapped */
552 len = ret ? 0 : len;
553 return len;
556} 554}
557 555
558static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, 556static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -684,7 +682,8 @@ static struct iommu_ops msm_iommu_ops = {
684 .map = msm_iommu_map, 682 .map = msm_iommu_map,
685 .unmap = msm_iommu_unmap, 683 .unmap = msm_iommu_unmap,
686 .iova_to_phys = msm_iommu_iova_to_phys, 684 .iova_to_phys = msm_iommu_iova_to_phys,
687 .domain_has_cap = msm_iommu_domain_has_cap 685 .domain_has_cap = msm_iommu_domain_has_cap,
686 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
688}; 687};
689 688
690static int __init get_tex_class(int icp, int ocp, int mt, int nos) 689static int __init get_tex_class(int icp, int ocp, int mt, int nos)
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 8f32b2bf7587..08cf7ec5b4a5 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -33,6 +33,9 @@
33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ 33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
34 __i++) 34 __i++)
35 35
36/* bitmap of the page sizes currently supported */
37#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
38
36/** 39/**
37 * struct omap_iommu_domain - omap iommu domain 40 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table 41 * @pgtable: the page table
@@ -1019,12 +1022,11 @@ static void iopte_cachep_ctor(void *iopte)
1019} 1022}
1020 1023
1021static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, 1024static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1022 phys_addr_t pa, int order, int prot) 1025 phys_addr_t pa, size_t bytes, int prot)
1023{ 1026{
1024 struct omap_iommu_domain *omap_domain = domain->priv; 1027 struct omap_iommu_domain *omap_domain = domain->priv;
1025 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1028 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1026 struct device *dev = oiommu->dev; 1029 struct device *dev = oiommu->dev;
1027 size_t bytes = PAGE_SIZE << order;
1028 struct iotlb_entry e; 1030 struct iotlb_entry e;
1029 int omap_pgsz; 1031 int omap_pgsz;
1030 u32 ret, flags; 1032 u32 ret, flags;
@@ -1049,19 +1051,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1049 return ret; 1051 return ret;
1050} 1052}
1051 1053
1052static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, 1054static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1053 int order) 1055 size_t size)
1054{ 1056{
1055 struct omap_iommu_domain *omap_domain = domain->priv; 1057 struct omap_iommu_domain *omap_domain = domain->priv;
1056 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1058 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1057 struct device *dev = oiommu->dev; 1059 struct device *dev = oiommu->dev;
1058 size_t unmap_size;
1059
1060 dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
1061 1060
1062 unmap_size = iopgtable_clear_entry(oiommu, da); 1061 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1063 1062
1064 return unmap_size ? get_order(unmap_size) : -EINVAL; 1063 return iopgtable_clear_entry(oiommu, da);
1065} 1064}
1066 1065
1067static int 1066static int
@@ -1211,6 +1210,7 @@ static struct iommu_ops omap_iommu_ops = {
1211 .unmap = omap_iommu_unmap, 1210 .unmap = omap_iommu_unmap,
1212 .iova_to_phys = omap_iommu_iova_to_phys, 1211 .iova_to_phys = omap_iommu_iova_to_phys,
1213 .domain_has_cap = omap_iommu_domain_has_cap, 1212 .domain_has_cap = omap_iommu_domain_has_cap,
1213 .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
1214}; 1214};
1215 1215
1216static int __init omap_iommu_init(void) 1216static int __init omap_iommu_init(void)
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index 46be456fcc00..6edc4ceba197 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -410,7 +410,6 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
410 unsigned int i, j; 410 unsigned int i, j;
411 struct scatterlist *sg; 411 struct scatterlist *sg;
412 u32 da = new->da_start; 412 u32 da = new->da_start;
413 int order;
414 413
415 if (!domain || !sgt) 414 if (!domain || !sgt)
416 return -EINVAL; 415 return -EINVAL;
@@ -429,12 +428,10 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
429 if (bytes_to_iopgsz(bytes) < 0) 428 if (bytes_to_iopgsz(bytes) < 0)
430 goto err_out; 429 goto err_out;
431 430
432 order = get_order(bytes);
433
434 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, 431 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
435 i, da, pa, bytes); 432 i, da, pa, bytes);
436 433
437 err = iommu_map(domain, da, pa, order, flags); 434 err = iommu_map(domain, da, pa, bytes, flags);
438 if (err) 435 if (err)
439 goto err_out; 436 goto err_out;
440 437
@@ -449,10 +446,9 @@ err_out:
449 size_t bytes; 446 size_t bytes;
450 447
451 bytes = sg->length + sg->offset; 448 bytes = sg->length + sg->offset;
452 order = get_order(bytes);
453 449
454 /* ignore failures.. we're already handling one */ 450 /* ignore failures.. we're already handling one */
455 iommu_unmap(domain, da, order); 451 iommu_unmap(domain, da, bytes);
456 452
457 da += bytes; 453 da += bytes;
458 } 454 }
@@ -467,7 +463,8 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
467 size_t total = area->da_end - area->da_start; 463 size_t total = area->da_end - area->da_start;
468 const struct sg_table *sgt = area->sgt; 464 const struct sg_table *sgt = area->sgt;
469 struct scatterlist *sg; 465 struct scatterlist *sg;
470 int i, err; 466 int i;
467 size_t unmapped;
471 468
472 BUG_ON(!sgtable_ok(sgt)); 469 BUG_ON(!sgtable_ok(sgt));
473 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); 470 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
@@ -475,13 +472,11 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
475 start = area->da_start; 472 start = area->da_start;
476 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 473 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
477 size_t bytes; 474 size_t bytes;
478 int order;
479 475
480 bytes = sg->length + sg->offset; 476 bytes = sg->length + sg->offset;
481 order = get_order(bytes);
482 477
483 err = iommu_unmap(domain, start, order); 478 unmapped = iommu_unmap(domain, start, bytes);
484 if (err < 0) 479 if (unmapped < bytes)
485 break; 480 break;
486 481
487 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", 482 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",