aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorKeir Fraser <Keir.Fraser@cl.cam.ac.uk>2007-07-21 07:37:24 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-21 20:49:14 -0400
commitdf336d1c7b6fd510fa6d3a028f999e7586c7026e (patch)
treed393ccb7b9051e8e491136f7a4a1786d5bf4f1e8 /lib
parent28de7948a896763bc97ccd416bba5b9422158350 (diff)
Fix swiotlb_sync_single_range()
If the swiotlb maps a multi-slab region, swiotlb_sync_single_range() can be invoked to sync a sub-region which does not include the first slab. Unfortunately io_tlb_orig_addr[] is only initialised for the first slab, and hence the call to sync_single() will read a garbage orig_addr in this case. This patch fixes the issue by initialising all mapped slabs in io_tlb_orig_addr[]. It also correctly adjusts the buffer pointer in sync_single() to handle the case that the given dma_addr is not aligned on a slab boundary. Signed-off-by: Keir Fraser <keir.fraser@cl.cam.ac.uk> Cc: "Luck, Tony" <tony.luck@intel.com> Acked-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 10c13ad0d82d..a7381d55663a 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -357,7 +357,8 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
357 * This is needed when we sync the memory. Then we sync the buffer if 357 * This is needed when we sync the memory. Then we sync the buffer if
358 * needed. 358 * needed.
359 */ 359 */
360 io_tlb_orig_addr[index] = buffer; 360 for (i = 0; i < nslots; i++)
361 io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT);
361 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 362 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
362 memcpy(dma_addr, buffer, size); 363 memcpy(dma_addr, buffer, size);
363 364
@@ -418,6 +419,8 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
418 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 419 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
419 char *buffer = io_tlb_orig_addr[index]; 420 char *buffer = io_tlb_orig_addr[index];
420 421
422 buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
423
421 switch (target) { 424 switch (target) {
422 case SYNC_FOR_CPU: 425 case SYNC_FOR_CPU:
423 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 426 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))