diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2012-10-15 13:19:44 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2012-10-30 09:32:07 -0400 |
commit | 61ca08c3220032dd88815b3465d56cb779258168 (patch) | |
tree | b126015d4fd76ff1e4b936097877e734aa19746b /lib/swiotlb.c | |
parent | e05ed4d1fad9e730995abb08cb9bc3bffac5018b (diff) |
swiotlb: Use physical addresses for swiotlb_tbl_unmap_single
This change makes it so that the unmap functionality also uses physical
addresses. This helps to further reduce the use of virt_to_phys and
phys_to_virt functions.
In order to clarify things since we now have 2 physical addresses in use
inside of swiotlb_tbl_unmap_single I am renaming phys to orig_addr, and
dma_addr to tlb_addr. This way is should be clear that orig_addr is
contained within io_orig_addr and tlb_addr is an address within the
io_tlb_addr buffer.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r-- | lib/swiotlb.c | 37 |
1 files changed, 19 insertions, 18 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 3adc148bb8d8..d7701dcf407f 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -515,20 +515,20 @@ phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, | |||
515 | /* | 515 | /* |
516 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 516 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. |
517 | */ | 517 | */ |
518 | void | 518 | void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, |
519 | swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, | 519 | size_t size, enum dma_data_direction dir) |
520 | enum dma_data_direction dir) | ||
521 | { | 520 | { |
522 | unsigned long flags; | 521 | unsigned long flags; |
523 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 522 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
524 | int index = (dma_addr - (char *)phys_to_virt(io_tlb_start)) >> IO_TLB_SHIFT; | 523 | int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; |
525 | phys_addr_t phys = io_tlb_orig_addr[index]; | 524 | phys_addr_t orig_addr = io_tlb_orig_addr[index]; |
526 | 525 | ||
527 | /* | 526 | /* |
528 | * First, sync the memory before unmapping the entry | 527 | * First, sync the memory before unmapping the entry |
529 | */ | 528 | */ |
530 | if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) | 529 | if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) |
531 | swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); | 530 | swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), |
531 | size, DMA_FROM_DEVICE); | ||
532 | 532 | ||
533 | /* | 533 | /* |
534 | * Return the buffer to the free list by setting the corresponding | 534 | * Return the buffer to the free list by setting the corresponding |
@@ -621,17 +621,18 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
621 | 621 | ||
622 | ret = phys_to_virt(paddr); | 622 | ret = phys_to_virt(paddr); |
623 | dev_addr = phys_to_dma(hwdev, paddr); | 623 | dev_addr = phys_to_dma(hwdev, paddr); |
624 | } | ||
625 | 624 | ||
626 | /* Confirm address can be DMA'd by device */ | 625 | /* Confirm address can be DMA'd by device */ |
627 | if (dev_addr + size - 1 > dma_mask) { | 626 | if (dev_addr + size - 1 > dma_mask) { |
628 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 627 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
629 | (unsigned long long)dma_mask, | 628 | (unsigned long long)dma_mask, |
630 | (unsigned long long)dev_addr); | 629 | (unsigned long long)dev_addr); |
631 | 630 | ||
632 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 631 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
633 | swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | 632 | swiotlb_tbl_unmap_single(hwdev, paddr, |
634 | return NULL; | 633 | size, DMA_TO_DEVICE); |
634 | return NULL; | ||
635 | } | ||
635 | } | 636 | } |
636 | 637 | ||
637 | *dma_handle = dev_addr; | 638 | *dma_handle = dev_addr; |
@@ -652,7 +653,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
652 | free_pages((unsigned long)vaddr, get_order(size)); | 653 | free_pages((unsigned long)vaddr, get_order(size)); |
653 | else | 654 | else |
654 | /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ | 655 | /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ |
655 | swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); | 656 | swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE); |
656 | } | 657 | } |
657 | EXPORT_SYMBOL(swiotlb_free_coherent); | 658 | EXPORT_SYMBOL(swiotlb_free_coherent); |
658 | 659 | ||
@@ -716,7 +717,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
716 | 717 | ||
717 | /* Ensure that the address returned is DMA'ble */ | 718 | /* Ensure that the address returned is DMA'ble */ |
718 | if (!dma_capable(dev, dev_addr, size)) { | 719 | if (!dma_capable(dev, dev_addr, size)) { |
719 | swiotlb_tbl_unmap_single(dev, phys_to_virt(map), size, dir); | 720 | swiotlb_tbl_unmap_single(dev, map, size, dir); |
720 | return phys_to_dma(dev, io_tlb_overflow_buffer); | 721 | return phys_to_dma(dev, io_tlb_overflow_buffer); |
721 | } | 722 | } |
722 | 723 | ||
@@ -740,7 +741,7 @@ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | |||
740 | BUG_ON(dir == DMA_NONE); | 741 | BUG_ON(dir == DMA_NONE); |
741 | 742 | ||
742 | if (is_swiotlb_buffer(paddr)) { | 743 | if (is_swiotlb_buffer(paddr)) { |
743 | swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); | 744 | swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); |
744 | return; | 745 | return; |
745 | } | 746 | } |
746 | 747 | ||