aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2015-07-16 14:40:12 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2015-07-28 10:47:56 -0400
commit8f6429c7cb59f28433253575cc8e3262eed63592 (patch)
treeb0ba92c894735701a01d289f92c42c221c6d3cf1 /drivers/iommu
parent52721d9d3334c1cb1f76219a161084094ec634dc (diff)
iommu/iova: Avoid over-allocating when size-aligned
Currently, allocating a size-aligned IOVA region quietly adjusts the actual allocation size in the process, returning a rounded-up power-of-two-sized allocation. This results in mismatched behaviour in the IOMMU driver if the original size was not a power of two, where the original size is mapped, but the rounded-up IOVA size is unmapped. Whilst some IOMMUs will happily unmap already-unmapped pages, others consider this an error, so fix it by computing the necessary alignment padding without altering the actual allocation size. Also clean up by making pad_size unsigned, since its callers always pass unsigned values and negative padding makes little sense here anyway. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/iova.c23
2 files changed, 8 insertions, 17 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a98a7b27aca1..92101597cede 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3233,6 +3233,8 @@ static struct iova *intel_alloc_iova(struct device *dev,
3233 3233
3234 /* Restrict dma_mask to the width that the iommu can handle */ 3234 /* Restrict dma_mask to the width that the iommu can handle */
3235 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); 3235 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3236 /* Ensure we reserve the whole size-aligned region */
3237 nrpages = __roundup_pow_of_two(nrpages);
3236 3238
3237 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { 3239 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3238 /* 3240 /*
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7c3d923f3e1..29f2efcf668e 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -120,19 +120,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
120 } 120 }
121} 121}
122 122
123/* Computes the padding size required, to make the 123/*
124 * the start address naturally aligned on its size 124 * Computes the padding size required, to make the start address
125 * naturally aligned on the power-of-two order of its size
125 */ 126 */
126static int 127static unsigned int
127iova_get_pad_size(int size, unsigned int limit_pfn) 128iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
128{ 129{
129 unsigned int pad_size = 0; 130 return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
130 unsigned int order = ilog2(size);
131
132 if (order)
133 pad_size = (limit_pfn + 1) % (1 << order);
134
135 return pad_size;
136} 131}
137 132
138static int __alloc_and_insert_iova_range(struct iova_domain *iovad, 133static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
@@ -265,12 +260,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
265 if (!new_iova) 260 if (!new_iova)
266 return NULL; 261 return NULL;
267 262
268 /* If size aligned is set then round the size to
269 * to next power of two.
270 */
271 if (size_aligned)
272 size = __roundup_pow_of_two(size);
273
274 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, 263 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
275 new_iova, size_aligned); 264 new_iova, size_aligned);
276 265