aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2005-09-29 17:44:57 -0400
committerTony Luck <tony.luck@intel.com>2005-09-29 17:44:57 -0400
commitde69e0f0b38a467d881e138a302b005bf31c8400 (patch)
treecce186614aafa090de860c65855c90600b300c56
parent878a97cfd7014b01285db09f52f9881ffe4cb608 (diff)
[PATCH] swiotlb: support syncing DMA_BIDIRECTIONAL mappings
The current implementation of sync_single in swiotlb.c chokes on DMA_BIDIRECTIONAL mappings. This patch adds the capability to sync those mappings, and optimizes other syncs by accounting for the sync target (i.e. cpu or device) in addition to the DMA direction of the mapping. Signed-off-by: John W. Linville <linville@tuxdriver.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--lib/swiotlb.c62
1 files changed, 40 insertions, 22 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 566791b3f583..a1a6d6bf87b4 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -58,6 +58,14 @@
58 */ 58 */
59#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 59#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
60 60
61/*
62 * Enumeration for sync targets
63 */
64enum dma_sync_target {
65 SYNC_FOR_CPU = 0,
66 SYNC_FOR_DEVICE = 1,
67};
68
61int swiotlb_force; 69int swiotlb_force;
62 70
63/* 71/*
@@ -397,21 +405,28 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
397} 405}
398 406
399static void 407static void
400sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 408sync_single(struct device *hwdev, char *dma_addr, size_t size,
409 int dir, int target)
401{ 410{
402 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 411 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
403 char *buffer = io_tlb_orig_addr[index]; 412 char *buffer = io_tlb_orig_addr[index];
404 413
405 /* 414 switch (target) {
406 * bounce... copy the data back into/from the original buffer 415 case SYNC_FOR_CPU:
407 * XXX How do you handle DMA_BIDIRECTIONAL here ? 416 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
408 */ 417 memcpy(buffer, dma_addr, size);
409 if (dir == DMA_FROM_DEVICE) 418 else if (dir != DMA_TO_DEVICE)
410 memcpy(buffer, dma_addr, size); 419 BUG();
411 else if (dir == DMA_TO_DEVICE) 420 break;
412 memcpy(dma_addr, buffer, size); 421 case SYNC_FOR_DEVICE:
413 else 422 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
423 memcpy(dma_addr, buffer, size);
424 else if (dir != DMA_FROM_DEVICE)
425 BUG();
426 break;
427 default:
414 BUG(); 428 BUG();
429 }
415} 430}
416 431
417void * 432void *
@@ -596,14 +611,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
596 */ 611 */
597static inline void 612static inline void
598swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 613swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
599 size_t size, int dir) 614 size_t size, int dir, int target)
600{ 615{
601 char *dma_addr = phys_to_virt(dev_addr); 616 char *dma_addr = phys_to_virt(dev_addr);
602 617
603 if (dir == DMA_NONE) 618 if (dir == DMA_NONE)
604 BUG(); 619 BUG();
605 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 620 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
606 sync_single(hwdev, dma_addr, size, dir); 621 sync_single(hwdev, dma_addr, size, dir, target);
607 else if (dir == DMA_FROM_DEVICE) 622 else if (dir == DMA_FROM_DEVICE)
608 mark_clean(dma_addr, size); 623 mark_clean(dma_addr, size);
609} 624}
@@ -612,14 +627,14 @@ void
612swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 627swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
613 size_t size, int dir) 628 size_t size, int dir)
614{ 629{
615 swiotlb_sync_single(hwdev, dev_addr, size, dir); 630 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
616} 631}
617 632
618void 633void
619swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 634swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
620 size_t size, int dir) 635 size_t size, int dir)
621{ 636{
622 swiotlb_sync_single(hwdev, dev_addr, size, dir); 637 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
623} 638}
624 639
625/* 640/*
@@ -627,14 +642,15 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
627 */ 642 */
628static inline void 643static inline void
629swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, 644swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
630 unsigned long offset, size_t size, int dir) 645 unsigned long offset, size_t size,
646 int dir, int target)
631{ 647{
632 char *dma_addr = phys_to_virt(dev_addr) + offset; 648 char *dma_addr = phys_to_virt(dev_addr) + offset;
633 649
634 if (dir == DMA_NONE) 650 if (dir == DMA_NONE)
635 BUG(); 651 BUG();
636 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 652 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
637 sync_single(hwdev, dma_addr, size, dir); 653 sync_single(hwdev, dma_addr, size, dir, target);
638 else if (dir == DMA_FROM_DEVICE) 654 else if (dir == DMA_FROM_DEVICE)
639 mark_clean(dma_addr, size); 655 mark_clean(dma_addr, size);
640} 656}
@@ -643,14 +659,16 @@ void
643swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 659swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
644 unsigned long offset, size_t size, int dir) 660 unsigned long offset, size_t size, int dir)
645{ 661{
646 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir); 662 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
663 SYNC_FOR_CPU);
647} 664}
648 665
649void 666void
650swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, 667swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
651 unsigned long offset, size_t size, int dir) 668 unsigned long offset, size_t size, int dir)
652{ 669{
653 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir); 670 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
671 SYNC_FOR_DEVICE);
654} 672}
655 673
656/* 674/*
@@ -729,7 +747,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
729 */ 747 */
730static inline void 748static inline void
731swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, 749swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
732 int nelems, int dir) 750 int nelems, int dir, int target)
733{ 751{
734 int i; 752 int i;
735 753
@@ -739,21 +757,21 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
739 for (i = 0; i < nelems; i++, sg++) 757 for (i = 0; i < nelems; i++, sg++)
740 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 758 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
741 sync_single(hwdev, (void *) sg->dma_address, 759 sync_single(hwdev, (void *) sg->dma_address,
742 sg->dma_length, dir); 760 sg->dma_length, dir, target);
743} 761}
744 762
745void 763void
746swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 764swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
747 int nelems, int dir) 765 int nelems, int dir)
748{ 766{
749 swiotlb_sync_sg(hwdev, sg, nelems, dir); 767 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
750} 768}
751 769
752void 770void
753swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 771swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
754 int nelems, int dir) 772 int nelems, int dir)
755{ 773{
756 swiotlb_sync_sg(hwdev, sg, nelems, dir); 774 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
757} 775}
758 776
759int 777int