aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2008-04-29 03:59:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:06:05 -0400
commita7133a15587b8921af8b074e0d3fe1606cbe5597 (patch)
tree129706734260468ac6aac966584ffeca31adf471 /lib/swiotlb.c
parent9a3be324e3d9da08219d81d6765f445a726cf962 (diff)
lib/swiotlb.c: cleanups
There's a pointlessly braced block of code in there. Remove the braces and save a tabstop. Cc: Andi Kleen <ak@suse.de> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jan Beulich <jbeulich@novell.com> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c89
1 files changed, 43 insertions, 46 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 025922807e6e..256c8445e54d 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -331,56 +331,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
331 * request and allocate a buffer from that IO TLB pool. 331 * request and allocate a buffer from that IO TLB pool.
332 */ 332 */
333 spin_lock_irqsave(&io_tlb_lock, flags); 333 spin_lock_irqsave(&io_tlb_lock, flags);
334 { 334 index = ALIGN(io_tlb_index, stride);
335 index = ALIGN(io_tlb_index, stride); 335 if (index >= io_tlb_nslabs)
336 if (index >= io_tlb_nslabs) 336 index = 0;
337 index = 0; 337 wrap = index;
338 wrap = index; 338
339 339 do {
340 do { 340 while (is_span_boundary(index, nslots, offset_slots,
341 while (is_span_boundary(index, nslots, offset_slots, 341 max_slots)) {
342 max_slots)) {
343 index += stride;
344 if (index >= io_tlb_nslabs)
345 index = 0;
346 if (index == wrap)
347 goto not_found;
348 }
349
350 /*
351 * If we find a slot that indicates we have 'nslots'
352 * number of contiguous buffers, we allocate the
353 * buffers from that slot and mark the entries as '0'
354 * indicating unavailable.
355 */
356 if (io_tlb_list[index] >= nslots) {
357 int count = 0;
358
359 for (i = index; i < (int) (index + nslots); i++)
360 io_tlb_list[i] = 0;
361 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
362 io_tlb_list[i] = ++count;
363 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
364
365 /*
366 * Update the indices to avoid searching in
367 * the next round.
368 */
369 io_tlb_index = ((index + nslots) < io_tlb_nslabs
370 ? (index + nslots) : 0);
371
372 goto found;
373 }
374 index += stride; 342 index += stride;
375 if (index >= io_tlb_nslabs) 343 if (index >= io_tlb_nslabs)
376 index = 0; 344 index = 0;
377 } while (index != wrap); 345 if (index == wrap)
346 goto not_found;
347 }
378 348
379 not_found: 349 /*
380 spin_unlock_irqrestore(&io_tlb_lock, flags); 350 * If we find a slot that indicates we have 'nslots' number of
381 return NULL; 351 * contiguous buffers, we allocate the buffers from that slot
382 } 352 * and mark the entries as '0' indicating unavailable.
383 found: 353 */
354 if (io_tlb_list[index] >= nslots) {
355 int count = 0;
356
357 for (i = index; i < (int) (index + nslots); i++)
358 io_tlb_list[i] = 0;
359 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
360 io_tlb_list[i] = ++count;
361 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
362
363 /*
364 * Update the indices to avoid searching in the next
365 * round.
366 */
367 io_tlb_index = ((index + nslots) < io_tlb_nslabs
368 ? (index + nslots) : 0);
369
370 goto found;
371 }
372 index += stride;
373 if (index >= io_tlb_nslabs)
374 index = 0;
375 } while (index != wrap);
376
377not_found:
378 spin_unlock_irqrestore(&io_tlb_lock, flags);
379 return NULL;
380found:
384 spin_unlock_irqrestore(&io_tlb_lock, flags); 381 spin_unlock_irqrestore(&io_tlb_lock, flags);
385 382
386 /* 383 /*