aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2008-03-13 05:13:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-03-13 16:15:52 -0400
commitb15a3891c916f32a29832886a053a48be2741d4d (patch)
tree59971b1f3b1961b7c13a170b1a3617431db03ac5
parent96e31022a1b6e7cb173cbb3dce1fde7ba548860a (diff)
avoid endless loops in lib/swiotlb.c
Commit 681cc5cd3efbeafca6386114070e0bfb5012e249 ("iommu sg merging: swiotlb: respect the segment boundary limits") introduced two possibilities for entering an endless loop in lib/swiotlb.c: - if max_slots is zero (possible if mask is ~0UL) - if the number of slots requested fits into a swiotlb segment, but is too large for the part of a segment which remains after considering offset_slots This fixes them Signed-off-by: Jan Beulich <jbeulich@novell.com> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--lib/swiotlb.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 4bb5a11e18a2..025922807e6e 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -310,7 +310,9 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
310 start_dma_addr = virt_to_bus(io_tlb_start) & mask; 310 start_dma_addr = virt_to_bus(io_tlb_start) & mask;
311 311
312 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 312 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
313 max_slots = ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 313 max_slots = mask + 1
314 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
315 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
314 316
315 /* 317 /*
316 * For mappings greater than a page, we limit the stride (and 318 * For mappings greater than a page, we limit the stride (and
@@ -333,16 +335,18 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
333 index = ALIGN(io_tlb_index, stride); 335 index = ALIGN(io_tlb_index, stride);
334 if (index >= io_tlb_nslabs) 336 if (index >= io_tlb_nslabs)
335 index = 0; 337 index = 0;
336
337 while (is_span_boundary(index, nslots, offset_slots,
338 max_slots)) {
339 index += stride;
340 if (index >= io_tlb_nslabs)
341 index = 0;
342 }
343 wrap = index; 338 wrap = index;
344 339
345 do { 340 do {
341 while (is_span_boundary(index, nslots, offset_slots,
342 max_slots)) {
343 index += stride;
344 if (index >= io_tlb_nslabs)
345 index = 0;
346 if (index == wrap)
347 goto not_found;
348 }
349
346 /* 350 /*
347 * If we find a slot that indicates we have 'nslots' 351 * If we find a slot that indicates we have 'nslots'
348 * number of contiguous buffers, we allocate the 352 * number of contiguous buffers, we allocate the
@@ -367,14 +371,12 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
367 371
368 goto found; 372 goto found;
369 } 373 }
370 do { 374 index += stride;
371 index += stride; 375 if (index >= io_tlb_nslabs)
372 if (index >= io_tlb_nslabs) 376 index = 0;
373 index = 0;
374 } while (is_span_boundary(index, nslots, offset_slots,
375 max_slots));
376 } while (index != wrap); 377 } while (index != wrap);
377 378
379 not_found:
378 spin_unlock_irqrestore(&io_tlb_lock, flags); 380 spin_unlock_irqrestore(&io_tlb_lock, flags);
379 return NULL; 381 return NULL;
380 } 382 }