diff options
author | Ian Campbell <ian.campbell@citrix.com> | 2008-12-16 15:17:30 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-17 12:58:09 -0500 |
commit | e08e1f7adba522378e8d2ae941bf25443866136d (patch) | |
tree | 4410e414d856dc03d77a0c654d6b9672f6e69f36 | |
parent | a5ddde4a558b3bd1e3dc50e274a0db2ea7a8fd06 (diff) |
swiotlb: allow architectures to override phys<->bus<->phys conversions
Impact: generalize phys<->bus<->phys conversions in the swiotlb code
Architectures may need to override these conversions. Implement a
__weak hook point containing the default implementation.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/swiotlb.h | 3 | ||||
-rw-r--r-- | lib/swiotlb.c | 52 |
2 files changed, 39 insertions, 16 deletions
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 58b996a642f9..694f1839cbc0 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -27,6 +27,9 @@ swiotlb_init(void); | |||
27 | extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); | 27 | extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); |
28 | extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); | 28 | extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); |
29 | 29 | ||
30 | extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address); | ||
31 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); | ||
32 | |||
30 | extern void | 33 | extern void |
31 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 34 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
32 | dma_addr_t *dma_handle, gfp_t flags); | 35 | dma_addr_t *dma_handle, gfp_t flags); |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 1272b23e4769..3494263cdd9a 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -125,6 +125,26 @@ void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs) | |||
125 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); | 125 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); |
126 | } | 126 | } |
127 | 127 | ||
128 | dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr) | ||
129 | { | ||
130 | return paddr; | ||
131 | } | ||
132 | |||
133 | phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr) | ||
134 | { | ||
135 | return baddr; | ||
136 | } | ||
137 | |||
138 | static dma_addr_t swiotlb_virt_to_bus(volatile void *address) | ||
139 | { | ||
140 | return swiotlb_phys_to_bus(virt_to_phys(address)); | ||
141 | } | ||
142 | |||
143 | static void *swiotlb_bus_to_virt(dma_addr_t address) | ||
144 | { | ||
145 | return phys_to_virt(swiotlb_bus_to_phys(address)); | ||
146 | } | ||
147 | |||
128 | /* | 148 | /* |
129 | * Statically reserve bounce buffer space and initialize bounce buffer data | 149 | * Statically reserve bounce buffer space and initialize bounce buffer data |
130 | * structures for the software IO TLB used to implement the DMA API. | 150 | * structures for the software IO TLB used to implement the DMA API. |
@@ -168,7 +188,7 @@ swiotlb_init_with_default_size(size_t default_size) | |||
168 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 188 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); |
169 | 189 | ||
170 | printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", | 190 | printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", |
171 | virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); | 191 | swiotlb_virt_to_bus(io_tlb_start), swiotlb_virt_to_bus(io_tlb_end)); |
172 | } | 192 | } |
173 | 193 | ||
174 | void __init | 194 | void __init |
@@ -250,7 +270,7 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
250 | 270 | ||
251 | printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " | 271 | printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " |
252 | "0x%lx\n", bytes >> 20, | 272 | "0x%lx\n", bytes >> 20, |
253 | virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); | 273 | swiotlb_virt_to_bus(io_tlb_start), swiotlb_virt_to_bus(io_tlb_end)); |
254 | 274 | ||
255 | return 0; | 275 | return 0; |
256 | 276 | ||
@@ -298,7 +318,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) | |||
298 | unsigned long max_slots; | 318 | unsigned long max_slots; |
299 | 319 | ||
300 | mask = dma_get_seg_boundary(hwdev); | 320 | mask = dma_get_seg_boundary(hwdev); |
301 | start_dma_addr = virt_to_bus(io_tlb_start) & mask; | 321 | start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask; |
302 | 322 | ||
303 | offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 323 | offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
304 | 324 | ||
@@ -475,7 +495,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
475 | dma_mask = hwdev->coherent_dma_mask; | 495 | dma_mask = hwdev->coherent_dma_mask; |
476 | 496 | ||
477 | ret = (void *)__get_free_pages(flags, order); | 497 | ret = (void *)__get_free_pages(flags, order); |
478 | if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) { | 498 | if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) { |
479 | /* | 499 | /* |
480 | * The allocated memory isn't reachable by the device. | 500 | * The allocated memory isn't reachable by the device. |
481 | * Fall back on swiotlb_map_single(). | 501 | * Fall back on swiotlb_map_single(). |
@@ -496,7 +516,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
496 | } | 516 | } |
497 | 517 | ||
498 | memset(ret, 0, size); | 518 | memset(ret, 0, size); |
499 | dev_addr = virt_to_bus(ret); | 519 | dev_addr = swiotlb_virt_to_bus(ret); |
500 | 520 | ||
501 | /* Confirm address can be DMA'd by device */ | 521 | /* Confirm address can be DMA'd by device */ |
502 | if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { | 522 | if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { |
@@ -556,7 +576,7 @@ dma_addr_t | |||
556 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | 576 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, |
557 | int dir, struct dma_attrs *attrs) | 577 | int dir, struct dma_attrs *attrs) |
558 | { | 578 | { |
559 | dma_addr_t dev_addr = virt_to_bus(ptr); | 579 | dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr); |
560 | void *map; | 580 | void *map; |
561 | 581 | ||
562 | BUG_ON(dir == DMA_NONE); | 582 | BUG_ON(dir == DMA_NONE); |
@@ -577,7 +597,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
577 | map = io_tlb_overflow_buffer; | 597 | map = io_tlb_overflow_buffer; |
578 | } | 598 | } |
579 | 599 | ||
580 | dev_addr = virt_to_bus(map); | 600 | dev_addr = swiotlb_virt_to_bus(map); |
581 | 601 | ||
582 | /* | 602 | /* |
583 | * Ensure that the address returned is DMA'ble | 603 | * Ensure that the address returned is DMA'ble |
@@ -607,7 +627,7 @@ void | |||
607 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | 627 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, |
608 | size_t size, int dir, struct dma_attrs *attrs) | 628 | size_t size, int dir, struct dma_attrs *attrs) |
609 | { | 629 | { |
610 | char *dma_addr = bus_to_virt(dev_addr); | 630 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); |
611 | 631 | ||
612 | BUG_ON(dir == DMA_NONE); | 632 | BUG_ON(dir == DMA_NONE); |
613 | if (is_swiotlb_buffer(dma_addr)) | 633 | if (is_swiotlb_buffer(dma_addr)) |
@@ -637,7 +657,7 @@ static void | |||
637 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | 657 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
638 | size_t size, int dir, int target) | 658 | size_t size, int dir, int target) |
639 | { | 659 | { |
640 | char *dma_addr = bus_to_virt(dev_addr); | 660 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); |
641 | 661 | ||
642 | BUG_ON(dir == DMA_NONE); | 662 | BUG_ON(dir == DMA_NONE); |
643 | if (is_swiotlb_buffer(dma_addr)) | 663 | if (is_swiotlb_buffer(dma_addr)) |
@@ -668,7 +688,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | |||
668 | unsigned long offset, size_t size, | 688 | unsigned long offset, size_t size, |
669 | int dir, int target) | 689 | int dir, int target) |
670 | { | 690 | { |
671 | char *dma_addr = bus_to_virt(dev_addr) + offset; | 691 | char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset; |
672 | 692 | ||
673 | BUG_ON(dir == DMA_NONE); | 693 | BUG_ON(dir == DMA_NONE); |
674 | if (is_swiotlb_buffer(dma_addr)) | 694 | if (is_swiotlb_buffer(dma_addr)) |
@@ -724,7 +744,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
724 | 744 | ||
725 | for_each_sg(sgl, sg, nelems, i) { | 745 | for_each_sg(sgl, sg, nelems, i) { |
726 | addr = SG_ENT_VIRT_ADDRESS(sg); | 746 | addr = SG_ENT_VIRT_ADDRESS(sg); |
727 | dev_addr = virt_to_bus(addr); | 747 | dev_addr = swiotlb_virt_to_bus(addr); |
728 | if (swiotlb_force || | 748 | if (swiotlb_force || |
729 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | 749 | address_needs_mapping(hwdev, dev_addr, sg->length)) { |
730 | void *map = map_single(hwdev, addr, sg->length, dir); | 750 | void *map = map_single(hwdev, addr, sg->length, dir); |
@@ -737,7 +757,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
737 | sgl[0].dma_length = 0; | 757 | sgl[0].dma_length = 0; |
738 | return 0; | 758 | return 0; |
739 | } | 759 | } |
740 | sg->dma_address = virt_to_bus(map); | 760 | sg->dma_address = swiotlb_virt_to_bus(map); |
741 | } else | 761 | } else |
742 | sg->dma_address = dev_addr; | 762 | sg->dma_address = dev_addr; |
743 | sg->dma_length = sg->length; | 763 | sg->dma_length = sg->length; |
@@ -768,7 +788,7 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
768 | 788 | ||
769 | for_each_sg(sgl, sg, nelems, i) { | 789 | for_each_sg(sgl, sg, nelems, i) { |
770 | if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) | 790 | if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) |
771 | unmap_single(hwdev, bus_to_virt(sg->dma_address), | 791 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), |
772 | sg->dma_length, dir); | 792 | sg->dma_length, dir); |
773 | else if (dir == DMA_FROM_DEVICE) | 793 | else if (dir == DMA_FROM_DEVICE) |
774 | dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); | 794 | dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); |
@@ -801,7 +821,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | |||
801 | 821 | ||
802 | for_each_sg(sgl, sg, nelems, i) { | 822 | for_each_sg(sgl, sg, nelems, i) { |
803 | if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) | 823 | if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) |
804 | sync_single(hwdev, bus_to_virt(sg->dma_address), | 824 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), |
805 | sg->dma_length, dir, target); | 825 | sg->dma_length, dir, target); |
806 | else if (dir == DMA_FROM_DEVICE) | 826 | else if (dir == DMA_FROM_DEVICE) |
807 | dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); | 827 | dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); |
@@ -825,7 +845,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
825 | int | 845 | int |
826 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) | 846 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) |
827 | { | 847 | { |
828 | return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); | 848 | return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer)); |
829 | } | 849 | } |
830 | 850 | ||
831 | /* | 851 | /* |
@@ -837,7 +857,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) | |||
837 | int | 857 | int |
838 | swiotlb_dma_supported(struct device *hwdev, u64 mask) | 858 | swiotlb_dma_supported(struct device *hwdev, u64 mask) |
839 | { | 859 | { |
840 | return virt_to_bus(io_tlb_end - 1) <= mask; | 860 | return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask; |
841 | } | 861 | } |
842 | 862 | ||
843 | EXPORT_SYMBOL(swiotlb_map_single); | 863 | EXPORT_SYMBOL(swiotlb_map_single); |