aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <tomof@acm.org>2008-02-05 01:28:16 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:12 -0500
commit681cc5cd3efbeafca6386114070e0bfb5012e249 (patch)
tree21249148cf78175e1d099999784053a14dc2fc50 /lib/swiotlb.c
parent59fc67dedb46c29442989e52af39da67aea52512 (diff)
iommu sg merging: swiotlb: respect the segment boundary limits
This patch makes swiotlb not allocate a memory area spanning LLD's segment boundary. is_span_boundary() judges whether a memory area spans LLD's segment boundary. If map_single finds such a area, map_single tries to find the next available memory area. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Greg KH <greg@kroah.com> Cc: Jeff Garzik <jeff@garzik.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c41
1 files changed, 35 insertions, 6 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 1a8050ade861..4bb5a11e18a2 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -282,6 +282,15 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
282 return (addr & ~mask) != 0; 282 return (addr & ~mask) != 0;
283} 283}
284 284
285static inline unsigned int is_span_boundary(unsigned int index,
286 unsigned int nslots,
287 unsigned long offset_slots,
288 unsigned long max_slots)
289{
290 unsigned long offset = (offset_slots + index) & (max_slots - 1);
291 return offset + nslots > max_slots;
292}
293
285/* 294/*
286 * Allocates bounce buffer and returns its kernel virtual address. 295 * Allocates bounce buffer and returns its kernel virtual address.
287 */ 296 */
@@ -292,6 +301,16 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
292 char *dma_addr; 301 char *dma_addr;
293 unsigned int nslots, stride, index, wrap; 302 unsigned int nslots, stride, index, wrap;
294 int i; 303 int i;
304 unsigned long start_dma_addr;
305 unsigned long mask;
306 unsigned long offset_slots;
307 unsigned long max_slots;
308
309 mask = dma_get_seg_boundary(hwdev);
310 start_dma_addr = virt_to_bus(io_tlb_start) & mask;
311
312 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
313 max_slots = ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
295 314
296 /* 315 /*
297 * For mappings greater than a page, we limit the stride (and 316 * For mappings greater than a page, we limit the stride (and
@@ -311,10 +330,17 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
311 */ 330 */
312 spin_lock_irqsave(&io_tlb_lock, flags); 331 spin_lock_irqsave(&io_tlb_lock, flags);
313 { 332 {
314 wrap = index = ALIGN(io_tlb_index, stride); 333 index = ALIGN(io_tlb_index, stride);
315
316 if (index >= io_tlb_nslabs) 334 if (index >= io_tlb_nslabs)
317 wrap = index = 0; 335 index = 0;
336
337 while (is_span_boundary(index, nslots, offset_slots,
338 max_slots)) {
339 index += stride;
340 if (index >= io_tlb_nslabs)
341 index = 0;
342 }
343 wrap = index;
318 344
319 do { 345 do {
320 /* 346 /*
@@ -341,9 +367,12 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
341 367
342 goto found; 368 goto found;
343 } 369 }
344 index += stride; 370 do {
345 if (index >= io_tlb_nslabs) 371 index += stride;
346 index = 0; 372 if (index >= io_tlb_nslabs)
373 index = 0;
374 } while (is_span_boundary(index, nslots, offset_slots,
375 max_slots));
347 } while (index != wrap); 376 } while (index != wrap);
348 377
349 spin_unlock_irqrestore(&io_tlb_lock, flags); 378 spin_unlock_irqrestore(&io_tlb_lock, flags);