diff options
-rw-r--r-- | lib/swiotlb.c | 89 |
1 files changed, 43 insertions, 46 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 025922807e6e..256c8445e54d 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -331,56 +331,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) | |||
331 | * request and allocate a buffer from that IO TLB pool. | 331 | * request and allocate a buffer from that IO TLB pool. |
332 | */ | 332 | */ |
333 | spin_lock_irqsave(&io_tlb_lock, flags); | 333 | spin_lock_irqsave(&io_tlb_lock, flags); |
334 | { | 334 | index = ALIGN(io_tlb_index, stride); |
335 | index = ALIGN(io_tlb_index, stride); | 335 | if (index >= io_tlb_nslabs) |
336 | if (index >= io_tlb_nslabs) | 336 | index = 0; |
337 | index = 0; | 337 | wrap = index; |
338 | wrap = index; | 338 | |
339 | 339 | do { | |
340 | do { | 340 | while (is_span_boundary(index, nslots, offset_slots, |
341 | while (is_span_boundary(index, nslots, offset_slots, | 341 | max_slots)) { |
342 | max_slots)) { | ||
343 | index += stride; | ||
344 | if (index >= io_tlb_nslabs) | ||
345 | index = 0; | ||
346 | if (index == wrap) | ||
347 | goto not_found; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * If we find a slot that indicates we have 'nslots' | ||
352 | * number of contiguous buffers, we allocate the | ||
353 | * buffers from that slot and mark the entries as '0' | ||
354 | * indicating unavailable. | ||
355 | */ | ||
356 | if (io_tlb_list[index] >= nslots) { | ||
357 | int count = 0; | ||
358 | |||
359 | for (i = index; i < (int) (index + nslots); i++) | ||
360 | io_tlb_list[i] = 0; | ||
361 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) | ||
362 | io_tlb_list[i] = ++count; | ||
363 | dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); | ||
364 | |||
365 | /* | ||
366 | * Update the indices to avoid searching in | ||
367 | * the next round. | ||
368 | */ | ||
369 | io_tlb_index = ((index + nslots) < io_tlb_nslabs | ||
370 | ? (index + nslots) : 0); | ||
371 | |||
372 | goto found; | ||
373 | } | ||
374 | index += stride; | 342 | index += stride; |
375 | if (index >= io_tlb_nslabs) | 343 | if (index >= io_tlb_nslabs) |
376 | index = 0; | 344 | index = 0; |
377 | } while (index != wrap); | 345 | if (index == wrap) |
346 | goto not_found; | ||
347 | } | ||
378 | 348 | ||
379 | not_found: | 349 | /* |
380 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 350 | * If we find a slot that indicates we have 'nslots' number of |
381 | return NULL; | 351 | * contiguous buffers, we allocate the buffers from that slot |
382 | } | 352 | * and mark the entries as '0' indicating unavailable. |
383 | found: | 353 | */ |
354 | if (io_tlb_list[index] >= nslots) { | ||
355 | int count = 0; | ||
356 | |||
357 | for (i = index; i < (int) (index + nslots); i++) | ||
358 | io_tlb_list[i] = 0; | ||
359 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) | ||
360 | io_tlb_list[i] = ++count; | ||
361 | dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); | ||
362 | |||
363 | /* | ||
364 | * Update the indices to avoid searching in the next | ||
365 | * round. | ||
366 | */ | ||
367 | io_tlb_index = ((index + nslots) < io_tlb_nslabs | ||
368 | ? (index + nslots) : 0); | ||
369 | |||
370 | goto found; | ||
371 | } | ||
372 | index += stride; | ||
373 | if (index >= io_tlb_nslabs) | ||
374 | index = 0; | ||
375 | } while (index != wrap); | ||
376 | |||
377 | not_found: | ||
378 | spin_unlock_irqrestore(&io_tlb_lock, flags); | ||
379 | return NULL; | ||
380 | found: | ||
384 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 381 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
385 | 382 | ||
386 | /* | 383 | /* |