aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobert Richter <rrichter@marvell.com>2019-03-20 14:57:23 -0400
committerJoerg Roedel <jroedel@suse.de>2019-03-22 07:01:58 -0400
commit80ef4464d5e27408685e609d389663aad46644b9 (patch)
treeca955b70bf998d4b527e0e1f2de41c9ce50029a7
parent4e50ce03976fbc8ae995a000c4b10c737467beaa (diff)
iommu/iova: Fix tracking of recently failed iova address
If a 32 bit allocation request is too big to possibly succeed, it early exits with a failure and then should never update max32_alloc_ size. This patch fixes current code, now the size is only updated if the slow path failed while walking the tree. Without the fix the allocation may enter the slow path again even if there was a failure before of a request with the same or a smaller size. Cc: <stable@vger.kernel.org> # 4.20+ Fixes: bee60e94a1e2 ("iommu/iova: Optimise attempts to allocate iova from 32bit address range") Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Robert Richter <rrichter@marvell.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/iova.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index f8d3ba247523..2de8122e218f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
207 curr_iova = rb_entry(curr, struct iova, node); 207 curr_iova = rb_entry(curr, struct iova, node);
208 } while (curr && new_pfn <= curr_iova->pfn_hi); 208 } while (curr && new_pfn <= curr_iova->pfn_hi);
209 209
210 if (limit_pfn < size || new_pfn < iovad->start_pfn) 210 if (limit_pfn < size || new_pfn < iovad->start_pfn) {
211 iovad->max32_alloc_size = size;
211 goto iova32_full; 212 goto iova32_full;
213 }
212 214
213 /* pfn_lo will point to size aligned address if size_aligned is set */ 215 /* pfn_lo will point to size aligned address if size_aligned is set */
214 new->pfn_lo = new_pfn; 216 new->pfn_lo = new_pfn;
@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
222 return 0; 224 return 0;
223 225
224iova32_full: 226iova32_full:
225 iovad->max32_alloc_size = size;
226 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 227 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
227 return -ENOMEM; 228 return -ENOMEM;
228} 229}