diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-12-22 13:26:05 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-28 03:54:52 -0500 |
commit | 70a7d3cc1308a55104fbe505d76f2aca8a4cf53e (patch) | |
tree | 2e4542f668d8b431140f0682c9d1aebbe7bf30eb /lib | |
parent | a08636690d06b2e36cfb4c2b3ee133a81c47e1e0 (diff) |
swiotlb: add hwdev to swiotlb_phys_to_bus() / swiotlb_sg_to_bus()
Impact: extend functions with a (yet unused) parameter, update callsites
Some architectures need it - in preparation for highmem swiotlb.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/swiotlb.c | 53 |
1 files changed, 22 insertions, 31 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index fa2dc4e5f9ba..3657da8ebbc3 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -126,7 +126,7 @@ void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs) | |||
126 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); | 126 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); |
127 | } | 127 | } |
128 | 128 | ||
129 | dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr) | 129 | dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) |
130 | { | 130 | { |
131 | return paddr; | 131 | return paddr; |
132 | } | 132 | } |
@@ -136,9 +136,10 @@ phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr) | |||
136 | return baddr; | 136 | return baddr; |
137 | } | 137 | } |
138 | 138 | ||
139 | static dma_addr_t swiotlb_virt_to_bus(volatile void *address) | 139 | static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, |
140 | volatile void *address) | ||
140 | { | 141 | { |
141 | return swiotlb_phys_to_bus(virt_to_phys(address)); | 142 | return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); |
142 | } | 143 | } |
143 | 144 | ||
144 | static void *swiotlb_bus_to_virt(dma_addr_t address) | 145 | static void *swiotlb_bus_to_virt(dma_addr_t address) |
@@ -151,35 +152,23 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) | |||
151 | return 0; | 152 | return 0; |
152 | } | 153 | } |
153 | 154 | ||
154 | static dma_addr_t swiotlb_sg_to_bus(struct scatterlist *sg) | 155 | static dma_addr_t swiotlb_sg_to_bus(struct device *hwdev, struct scatterlist *sg) |
155 | { | 156 | { |
156 | return swiotlb_phys_to_bus(page_to_phys(sg_page(sg)) + sg->offset); | 157 | return swiotlb_phys_to_bus(hwdev, page_to_phys(sg_page(sg)) + sg->offset); |
157 | } | 158 | } |
158 | 159 | ||
159 | static void swiotlb_print_info(unsigned long bytes) | 160 | static void swiotlb_print_info(unsigned long bytes) |
160 | { | 161 | { |
161 | phys_addr_t pstart, pend; | 162 | phys_addr_t pstart, pend; |
162 | dma_addr_t bstart, bend; | ||
163 | 163 | ||
164 | pstart = virt_to_phys(io_tlb_start); | 164 | pstart = virt_to_phys(io_tlb_start); |
165 | pend = virt_to_phys(io_tlb_end); | 165 | pend = virt_to_phys(io_tlb_end); |
166 | 166 | ||
167 | bstart = swiotlb_phys_to_bus(pstart); | ||
168 | bend = swiotlb_phys_to_bus(pend); | ||
169 | |||
170 | printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", | 167 | printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", |
171 | bytes >> 20, io_tlb_start, io_tlb_end); | 168 | bytes >> 20, io_tlb_start, io_tlb_end); |
172 | if (pstart != bstart || pend != bend) | 169 | printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", |
173 | printk(KERN_INFO "software IO TLB at phys %#llx - %#llx" | 170 | (unsigned long long)pstart, |
174 | " bus %#llx - %#llx\n", | 171 | (unsigned long long)pend); |
175 | (unsigned long long)pstart, | ||
176 | (unsigned long long)pend, | ||
177 | (unsigned long long)bstart, | ||
178 | (unsigned long long)bend); | ||
179 | else | ||
180 | printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", | ||
181 | (unsigned long long)pstart, | ||
182 | (unsigned long long)pend); | ||
183 | } | 172 | } |
184 | 173 | ||
185 | /* | 174 | /* |
@@ -406,7 +395,7 @@ map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, i | |||
406 | struct swiotlb_phys_addr slot_buf; | 395 | struct swiotlb_phys_addr slot_buf; |
407 | 396 | ||
408 | mask = dma_get_seg_boundary(hwdev); | 397 | mask = dma_get_seg_boundary(hwdev); |
409 | start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask; | 398 | start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask; |
410 | 399 | ||
411 | offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 400 | offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
412 | 401 | ||
@@ -585,7 +574,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
585 | dma_mask = hwdev->coherent_dma_mask; | 574 | dma_mask = hwdev->coherent_dma_mask; |
586 | 575 | ||
587 | ret = (void *)__get_free_pages(flags, order); | 576 | ret = (void *)__get_free_pages(flags, order); |
588 | if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) { | 577 | if (ret && |
578 | !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret), | ||
579 | size)) { | ||
589 | /* | 580 | /* |
590 | * The allocated memory isn't reachable by the device. | 581 | * The allocated memory isn't reachable by the device. |
591 | * Fall back on swiotlb_map_single(). | 582 | * Fall back on swiotlb_map_single(). |
@@ -609,7 +600,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
609 | } | 600 | } |
610 | 601 | ||
611 | memset(ret, 0, size); | 602 | memset(ret, 0, size); |
612 | dev_addr = swiotlb_virt_to_bus(ret); | 603 | dev_addr = swiotlb_virt_to_bus(hwdev, ret); |
613 | 604 | ||
614 | /* Confirm address can be DMA'd by device */ | 605 | /* Confirm address can be DMA'd by device */ |
615 | if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { | 606 | if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { |
@@ -669,7 +660,7 @@ dma_addr_t | |||
669 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | 660 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, |
670 | int dir, struct dma_attrs *attrs) | 661 | int dir, struct dma_attrs *attrs) |
671 | { | 662 | { |
672 | dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr); | 663 | dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); |
673 | void *map; | 664 | void *map; |
674 | struct swiotlb_phys_addr buffer; | 665 | struct swiotlb_phys_addr buffer; |
675 | 666 | ||
@@ -694,7 +685,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
694 | map = io_tlb_overflow_buffer; | 685 | map = io_tlb_overflow_buffer; |
695 | } | 686 | } |
696 | 687 | ||
697 | dev_addr = swiotlb_virt_to_bus(map); | 688 | dev_addr = swiotlb_virt_to_bus(hwdev, map); |
698 | 689 | ||
699 | /* | 690 | /* |
700 | * Ensure that the address returned is DMA'ble | 691 | * Ensure that the address returned is DMA'ble |
@@ -840,7 +831,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
840 | BUG_ON(dir == DMA_NONE); | 831 | BUG_ON(dir == DMA_NONE); |
841 | 832 | ||
842 | for_each_sg(sgl, sg, nelems, i) { | 833 | for_each_sg(sgl, sg, nelems, i) { |
843 | dev_addr = swiotlb_sg_to_bus(sg); | 834 | dev_addr = swiotlb_sg_to_bus(hwdev, sg); |
844 | if (range_needs_mapping(sg_virt(sg), sg->length) || | 835 | if (range_needs_mapping(sg_virt(sg), sg->length) || |
845 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | 836 | address_needs_mapping(hwdev, dev_addr, sg->length)) { |
846 | void *map; | 837 | void *map; |
@@ -856,7 +847,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
856 | sgl[0].dma_length = 0; | 847 | sgl[0].dma_length = 0; |
857 | return 0; | 848 | return 0; |
858 | } | 849 | } |
859 | sg->dma_address = swiotlb_virt_to_bus(map); | 850 | sg->dma_address = swiotlb_virt_to_bus(hwdev, map); |
860 | } else | 851 | } else |
861 | sg->dma_address = dev_addr; | 852 | sg->dma_address = dev_addr; |
862 | sg->dma_length = sg->length; | 853 | sg->dma_length = sg->length; |
@@ -886,7 +877,7 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
886 | BUG_ON(dir == DMA_NONE); | 877 | BUG_ON(dir == DMA_NONE); |
887 | 878 | ||
888 | for_each_sg(sgl, sg, nelems, i) { | 879 | for_each_sg(sgl, sg, nelems, i) { |
889 | if (sg->dma_address != swiotlb_sg_to_bus(sg)) | 880 | if (sg->dma_address != swiotlb_sg_to_bus(hwdev, sg)) |
890 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | 881 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), |
891 | sg->dma_length, dir); | 882 | sg->dma_length, dir); |
892 | else if (dir == DMA_FROM_DEVICE) | 883 | else if (dir == DMA_FROM_DEVICE) |
@@ -919,7 +910,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | |||
919 | BUG_ON(dir == DMA_NONE); | 910 | BUG_ON(dir == DMA_NONE); |
920 | 911 | ||
921 | for_each_sg(sgl, sg, nelems, i) { | 912 | for_each_sg(sgl, sg, nelems, i) { |
922 | if (sg->dma_address != swiotlb_sg_to_bus(sg)) | 913 | if (sg->dma_address != swiotlb_sg_to_bus(hwdev, sg)) |
923 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | 914 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), |
924 | sg->dma_length, dir, target); | 915 | sg->dma_length, dir, target); |
925 | else if (dir == DMA_FROM_DEVICE) | 916 | else if (dir == DMA_FROM_DEVICE) |
@@ -944,7 +935,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
944 | int | 935 | int |
945 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) | 936 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) |
946 | { | 937 | { |
947 | return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer)); | 938 | return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer)); |
948 | } | 939 | } |
949 | 940 | ||
950 | /* | 941 | /* |
@@ -956,7 +947,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) | |||
956 | int | 947 | int |
957 | swiotlb_dma_supported(struct device *hwdev, u64 mask) | 948 | swiotlb_dma_supported(struct device *hwdev, u64 mask) |
958 | { | 949 | { |
959 | return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask; | 950 | return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask; |
960 | } | 951 | } |
961 | 952 | ||
962 | EXPORT_SYMBOL(swiotlb_map_single); | 953 | EXPORT_SYMBOL(swiotlb_map_single); |