diff options
-rw-r--r-- | lib/swiotlb.c | 62 |
1 files changed, 40 insertions, 22 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 566791b3f583..a1a6d6bf87b4 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -58,6 +58,14 @@ | |||
58 | */ | 58 | */ |
59 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | 59 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
60 | 60 | ||
61 | /* | ||
62 | * Enumeration for sync targets | ||
63 | */ | ||
64 | enum dma_sync_target { | ||
65 | SYNC_FOR_CPU = 0, | ||
66 | SYNC_FOR_DEVICE = 1, | ||
67 | }; | ||
68 | |||
61 | int swiotlb_force; | 69 | int swiotlb_force; |
62 | 70 | ||
63 | /* | 71 | /* |
@@ -397,21 +405,28 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | |||
397 | } | 405 | } |
398 | 406 | ||
399 | static void | 407 | static void |
400 | sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | 408 | sync_single(struct device *hwdev, char *dma_addr, size_t size, |
409 | int dir, int target) | ||
401 | { | 410 | { |
402 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 411 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
403 | char *buffer = io_tlb_orig_addr[index]; | 412 | char *buffer = io_tlb_orig_addr[index]; |
404 | 413 | ||
405 | /* | 414 | switch (target) { |
406 | * bounce... copy the data back into/from the original buffer | 415 | case SYNC_FOR_CPU: |
407 | * XXX How do you handle DMA_BIDIRECTIONAL here ? | 416 | if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) |
408 | */ | 417 | memcpy(buffer, dma_addr, size); |
409 | if (dir == DMA_FROM_DEVICE) | 418 | else if (dir != DMA_TO_DEVICE) |
410 | memcpy(buffer, dma_addr, size); | 419 | BUG(); |
411 | else if (dir == DMA_TO_DEVICE) | 420 | break; |
412 | memcpy(dma_addr, buffer, size); | 421 | case SYNC_FOR_DEVICE: |
413 | else | 422 | if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) |
423 | memcpy(dma_addr, buffer, size); | ||
424 | else if (dir != DMA_FROM_DEVICE) | ||
425 | BUG(); | ||
426 | break; | ||
427 | default: | ||
414 | BUG(); | 428 | BUG(); |
429 | } | ||
415 | } | 430 | } |
416 | 431 | ||
417 | void * | 432 | void * |
@@ -596,14 +611,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | |||
596 | */ | 611 | */ |
597 | static inline void | 612 | static inline void |
598 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | 613 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
599 | size_t size, int dir) | 614 | size_t size, int dir, int target) |
600 | { | 615 | { |
601 | char *dma_addr = phys_to_virt(dev_addr); | 616 | char *dma_addr = phys_to_virt(dev_addr); |
602 | 617 | ||
603 | if (dir == DMA_NONE) | 618 | if (dir == DMA_NONE) |
604 | BUG(); | 619 | BUG(); |
605 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 620 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) |
606 | sync_single(hwdev, dma_addr, size, dir); | 621 | sync_single(hwdev, dma_addr, size, dir, target); |
607 | else if (dir == DMA_FROM_DEVICE) | 622 | else if (dir == DMA_FROM_DEVICE) |
608 | mark_clean(dma_addr, size); | 623 | mark_clean(dma_addr, size); |
609 | } | 624 | } |
@@ -612,14 +627,14 @@ void | |||
612 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 627 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
613 | size_t size, int dir) | 628 | size_t size, int dir) |
614 | { | 629 | { |
615 | swiotlb_sync_single(hwdev, dev_addr, size, dir); | 630 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); |
616 | } | 631 | } |
617 | 632 | ||
618 | void | 633 | void |
619 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | 634 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
620 | size_t size, int dir) | 635 | size_t size, int dir) |
621 | { | 636 | { |
622 | swiotlb_sync_single(hwdev, dev_addr, size, dir); | 637 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); |
623 | } | 638 | } |
624 | 639 | ||
625 | /* | 640 | /* |
@@ -627,14 +642,15 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | |||
627 | */ | 642 | */ |
628 | static inline void | 643 | static inline void |
629 | swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | 644 | swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, |
630 | unsigned long offset, size_t size, int dir) | 645 | unsigned long offset, size_t size, |
646 | int dir, int target) | ||
631 | { | 647 | { |
632 | char *dma_addr = phys_to_virt(dev_addr) + offset; | 648 | char *dma_addr = phys_to_virt(dev_addr) + offset; |
633 | 649 | ||
634 | if (dir == DMA_NONE) | 650 | if (dir == DMA_NONE) |
635 | BUG(); | 651 | BUG(); |
636 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 652 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) |
637 | sync_single(hwdev, dma_addr, size, dir); | 653 | sync_single(hwdev, dma_addr, size, dir, target); |
638 | else if (dir == DMA_FROM_DEVICE) | 654 | else if (dir == DMA_FROM_DEVICE) |
639 | mark_clean(dma_addr, size); | 655 | mark_clean(dma_addr, size); |
640 | } | 656 | } |
@@ -643,14 +659,16 @@ void | |||
643 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 659 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
644 | unsigned long offset, size_t size, int dir) | 660 | unsigned long offset, size_t size, int dir) |
645 | { | 661 | { |
646 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir); | 662 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, |
663 | SYNC_FOR_CPU); | ||
647 | } | 664 | } |
648 | 665 | ||
649 | void | 666 | void |
650 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | 667 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, |
651 | unsigned long offset, size_t size, int dir) | 668 | unsigned long offset, size_t size, int dir) |
652 | { | 669 | { |
653 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir); | 670 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, |
671 | SYNC_FOR_DEVICE); | ||
654 | } | 672 | } |
655 | 673 | ||
656 | /* | 674 | /* |
@@ -729,7 +747,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, | |||
729 | */ | 747 | */ |
730 | static inline void | 748 | static inline void |
731 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, | 749 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, |
732 | int nelems, int dir) | 750 | int nelems, int dir, int target) |
733 | { | 751 | { |
734 | int i; | 752 | int i; |
735 | 753 | ||
@@ -739,21 +757,21 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, | |||
739 | for (i = 0; i < nelems; i++, sg++) | 757 | for (i = 0; i < nelems; i++, sg++) |
740 | if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) | 758 | if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) |
741 | sync_single(hwdev, (void *) sg->dma_address, | 759 | sync_single(hwdev, (void *) sg->dma_address, |
742 | sg->dma_length, dir); | 760 | sg->dma_length, dir, target); |
743 | } | 761 | } |
744 | 762 | ||
745 | void | 763 | void |
746 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 764 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
747 | int nelems, int dir) | 765 | int nelems, int dir) |
748 | { | 766 | { |
749 | swiotlb_sync_sg(hwdev, sg, nelems, dir); | 767 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); |
750 | } | 768 | } |
751 | 769 | ||
752 | void | 770 | void |
753 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 771 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
754 | int nelems, int dir) | 772 | int nelems, int dir) |
755 | { | 773 | { |
756 | swiotlb_sync_sg(hwdev, sg, nelems, dir); | 774 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
757 | } | 775 | } |
758 | 776 | ||
759 | int | 777 | int |