diff options
author | Ian Campbell <ian.campbell@citrix.com> | 2008-12-16 15:17:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-17 12:58:13 -0500 |
commit | b81ea27b2329bf44b30c427800954f845896d476 (patch) | |
tree | 84bbfc80477c3561ee601c1c05dcc0917ced196b /lib | |
parent | e08e1f7adba522378e8d2ae941bf25443866136d (diff) |
swiotlb: add arch hook to force mapping
Impact: generalize the sw-IOTLB range checks
Some architectures require special rules to determine whether a range
needs mapping or not. This adds a weak function for architectures to
override.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/swiotlb.c | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 3494263cdd9a..d8b09051c455 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -145,6 +145,11 @@ static void *swiotlb_bus_to_virt(dma_addr_t address) | |||
145 | return phys_to_virt(swiotlb_bus_to_phys(address)); | 145 | return phys_to_virt(swiotlb_bus_to_phys(address)); |
146 | } | 146 | } |
147 | 147 | ||
148 | int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) | ||
149 | { | ||
150 | return 0; | ||
151 | } | ||
152 | |||
148 | /* | 153 | /* |
149 | * Statically reserve bounce buffer space and initialize bounce buffer data | 154 | * Statically reserve bounce buffer space and initialize bounce buffer data |
150 | * structures for the software IO TLB used to implement the DMA API. | 155 | * structures for the software IO TLB used to implement the DMA API. |
@@ -297,6 +302,11 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) | |||
297 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | 302 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); |
298 | } | 303 | } |
299 | 304 | ||
305 | static inline int range_needs_mapping(void *ptr, size_t size) | ||
306 | { | ||
307 | return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size); | ||
308 | } | ||
309 | |||
300 | static int is_swiotlb_buffer(char *addr) | 310 | static int is_swiotlb_buffer(char *addr) |
301 | { | 311 | { |
302 | return addr >= io_tlb_start && addr < io_tlb_end; | 312 | return addr >= io_tlb_start && addr < io_tlb_end; |
@@ -585,7 +595,8 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
585 | * we can safely return the device addr and not worry about bounce | 595 | * we can safely return the device addr and not worry about bounce |
586 | * buffering it. | 596 | * buffering it. |
587 | */ | 597 | */ |
588 | if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force) | 598 | if (!address_needs_mapping(hwdev, dev_addr, size) && |
599 | !range_needs_mapping(ptr, size)) | ||
589 | return dev_addr; | 600 | return dev_addr; |
590 | 601 | ||
591 | /* | 602 | /* |
@@ -745,7 +756,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
745 | for_each_sg(sgl, sg, nelems, i) { | 756 | for_each_sg(sgl, sg, nelems, i) { |
746 | addr = SG_ENT_VIRT_ADDRESS(sg); | 757 | addr = SG_ENT_VIRT_ADDRESS(sg); |
747 | dev_addr = swiotlb_virt_to_bus(addr); | 758 | dev_addr = swiotlb_virt_to_bus(addr); |
748 | if (swiotlb_force || | 759 | if (range_needs_mapping(sg_virt(sg), sg->length) || |
749 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | 760 | address_needs_mapping(hwdev, dev_addr, sg->length)) { |
750 | void *map = map_single(hwdev, addr, sg->length, dir); | 761 | void *map = map_single(hwdev, addr, sg->length, dir); |
751 | if (!map) { | 762 | if (!map) { |