diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-07-09 21:04:58 -0400 |
---|---|---|
committer | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-07-28 01:19:19 -0400 |
commit | b9394647ac88faad9db0f9e92eac4db434faded6 (patch) | |
tree | 23f44b8c06baedad1183afc9be95fb2a12e183bb | |
parent | 9a937c91eea31c4b594ea49a2a23c57003e04987 (diff) |
swiotlb: use dma_capable()
This converts swiotlb to use dma_capable() instead of
swiotlb_arch_address_needs_mapping() and is_buffer_dma_capable().
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Becky Bruce <beckyb@kernel.crashing.org>
-rw-r--r-- | lib/swiotlb.c | 24 |
1 files changed, 5 insertions, 19 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index cba3db809fbc..a012d93792b7 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -131,12 +131,6 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | |||
131 | return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); | 131 | return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); |
132 | } | 132 | } |
133 | 133 | ||
134 | int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev, | ||
135 | dma_addr_t addr, size_t size) | ||
136 | { | ||
137 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | ||
138 | } | ||
139 | |||
140 | static void swiotlb_print_info(unsigned long bytes) | 134 | static void swiotlb_print_info(unsigned long bytes) |
141 | { | 135 | { |
142 | phys_addr_t pstart, pend; | 136 | phys_addr_t pstart, pend; |
@@ -297,12 +291,6 @@ cleanup1: | |||
297 | return -ENOMEM; | 291 | return -ENOMEM; |
298 | } | 292 | } |
299 | 293 | ||
300 | static inline int | ||
301 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) | ||
302 | { | ||
303 | return swiotlb_arch_address_needs_mapping(hwdev, addr, size); | ||
304 | } | ||
305 | |||
306 | static int is_swiotlb_buffer(phys_addr_t paddr) | 294 | static int is_swiotlb_buffer(phys_addr_t paddr) |
307 | { | 295 | { |
308 | return paddr >= virt_to_phys(io_tlb_start) && | 296 | return paddr >= virt_to_phys(io_tlb_start) && |
@@ -539,9 +527,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
539 | dma_mask = hwdev->coherent_dma_mask; | 527 | dma_mask = hwdev->coherent_dma_mask; |
540 | 528 | ||
541 | ret = (void *)__get_free_pages(flags, order); | 529 | ret = (void *)__get_free_pages(flags, order); |
542 | if (ret && | 530 | if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) { |
543 | !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret), | ||
544 | size)) { | ||
545 | /* | 531 | /* |
546 | * The allocated memory isn't reachable by the device. | 532 | * The allocated memory isn't reachable by the device. |
547 | */ | 533 | */ |
@@ -563,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
563 | dev_addr = swiotlb_virt_to_bus(hwdev, ret); | 549 | dev_addr = swiotlb_virt_to_bus(hwdev, ret); |
564 | 550 | ||
565 | /* Confirm address can be DMA'd by device */ | 551 | /* Confirm address can be DMA'd by device */ |
566 | if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { | 552 | if (dev_addr + size > dma_mask) { |
567 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 553 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
568 | (unsigned long long)dma_mask, | 554 | (unsigned long long)dma_mask, |
569 | (unsigned long long)dev_addr); | 555 | (unsigned long long)dev_addr); |
@@ -635,7 +621,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
635 | * we can safely return the device addr and not worry about bounce | 621 | * we can safely return the device addr and not worry about bounce |
636 | * buffering it. | 622 | * buffering it. |
637 | */ | 623 | */ |
638 | if (!address_needs_mapping(dev, dev_addr, size) && !swiotlb_force) | 624 | if (dma_capable(dev, dev_addr, size) && !swiotlb_force) |
639 | return dev_addr; | 625 | return dev_addr; |
640 | 626 | ||
641 | /* | 627 | /* |
@@ -652,7 +638,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
652 | /* | 638 | /* |
653 | * Ensure that the address returned is DMA'ble | 639 | * Ensure that the address returned is DMA'ble |
654 | */ | 640 | */ |
655 | if (address_needs_mapping(dev, dev_addr, size)) | 641 | if (!dma_capable(dev, dev_addr, size)) |
656 | panic("map_single: bounce buffer is not DMA'ble"); | 642 | panic("map_single: bounce buffer is not DMA'ble"); |
657 | 643 | ||
658 | return dev_addr; | 644 | return dev_addr; |
@@ -805,7 +791,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
805 | dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); | 791 | dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); |
806 | 792 | ||
807 | if (swiotlb_force || | 793 | if (swiotlb_force || |
808 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | 794 | !dma_capable(hwdev, dev_addr, sg->length)) { |
809 | void *map = map_single(hwdev, sg_phys(sg), | 795 | void *map = map_single(hwdev, sg_phys(sg), |
810 | sg->length, dir); | 796 | sg->length, dir); |
811 | if (!map) { | 797 | if (!map) { |