aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/swiotlb-xen.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r--drivers/xen/swiotlb-xen.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index d8ef0bf577d2..189b8db5c983 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -43,6 +43,7 @@
43#include <xen/xen-ops.h> 43#include <xen/xen-ops.h>
44#include <xen/hvc-console.h> 44#include <xen/hvc-console.h>
45#include <asm/dma-mapping.h> 45#include <asm/dma-mapping.h>
46#include <asm/xen/page-coherent.h>
46/* 47/*
47 * Used to do a quick range check in swiotlb_tbl_unmap_single and 48 * Used to do a quick range check in swiotlb_tbl_unmap_single and
48 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this 49 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
@@ -142,6 +143,7 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
142 int i, rc; 143 int i, rc;
143 int dma_bits; 144 int dma_bits;
144 dma_addr_t dma_handle; 145 dma_addr_t dma_handle;
146 phys_addr_t p = virt_to_phys(buf);
145 147
146 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; 148 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
147 149
@@ -151,7 +153,7 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
151 153
152 do { 154 do {
153 rc = xen_create_contiguous_region( 155 rc = xen_create_contiguous_region(
154 (unsigned long)buf + (i << IO_TLB_SHIFT), 156 p + (i << IO_TLB_SHIFT),
155 get_order(slabs << IO_TLB_SHIFT), 157 get_order(slabs << IO_TLB_SHIFT),
156 dma_bits, &dma_handle); 158 dma_bits, &dma_handle);
157 } while (rc && dma_bits++ < max_dma_bits); 159 } while (rc && dma_bits++ < max_dma_bits);
@@ -279,7 +281,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
279 void *ret; 281 void *ret;
280 int order = get_order(size); 282 int order = get_order(size);
281 u64 dma_mask = DMA_BIT_MASK(32); 283 u64 dma_mask = DMA_BIT_MASK(32);
282 unsigned long vstart;
283 phys_addr_t phys; 284 phys_addr_t phys;
284 dma_addr_t dev_addr; 285 dma_addr_t dev_addr;
285 286
@@ -294,8 +295,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
294 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) 295 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
295 return ret; 296 return ret;
296 297
297 vstart = __get_free_pages(flags, order); 298 /* On ARM this function returns an ioremap'ped virtual address for
298 ret = (void *)vstart; 299 * which virt_to_phys doesn't return the corresponding physical
300 * address. In fact on ARM virt_to_phys only works for kernel direct
301 * mapped RAM memory. Also see comment below.
302 */
303 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
299 304
300 if (!ret) 305 if (!ret)
301 return ret; 306 return ret;
@@ -303,15 +308,19 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
303 if (hwdev && hwdev->coherent_dma_mask) 308 if (hwdev && hwdev->coherent_dma_mask)
304 dma_mask = dma_alloc_coherent_mask(hwdev, flags); 309 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
305 310
306 phys = virt_to_phys(ret); 311 /* At this point dma_handle is the physical address, next we are
312 * going to set it to the machine address.
313 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
314 * to *dma_handle. */
315 phys = *dma_handle;
307 dev_addr = xen_phys_to_bus(phys); 316 dev_addr = xen_phys_to_bus(phys);
308 if (((dev_addr + size - 1 <= dma_mask)) && 317 if (((dev_addr + size - 1 <= dma_mask)) &&
309 !range_straddles_page_boundary(phys, size)) 318 !range_straddles_page_boundary(phys, size))
310 *dma_handle = dev_addr; 319 *dma_handle = dev_addr;
311 else { 320 else {
312 if (xen_create_contiguous_region(vstart, order, 321 if (xen_create_contiguous_region(phys, order,
313 fls64(dma_mask), dma_handle) != 0) { 322 fls64(dma_mask), dma_handle) != 0) {
314 free_pages(vstart, order); 323 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
315 return NULL; 324 return NULL;
316 } 325 }
317 } 326 }
@@ -334,13 +343,15 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
334 if (hwdev && hwdev->coherent_dma_mask) 343 if (hwdev && hwdev->coherent_dma_mask)
335 dma_mask = hwdev->coherent_dma_mask; 344 dma_mask = hwdev->coherent_dma_mask;
336 345
337 phys = virt_to_phys(vaddr); 346 /* do not use virt_to_phys because on ARM it doesn't return you the
347 * physical address */
348 phys = xen_bus_to_phys(dev_addr);
338 349
339 if (((dev_addr + size - 1 > dma_mask)) || 350 if (((dev_addr + size - 1 > dma_mask)) ||
340 range_straddles_page_boundary(phys, size)) 351 range_straddles_page_boundary(phys, size))
341 xen_destroy_contiguous_region((unsigned long)vaddr, order); 352 xen_destroy_contiguous_region(phys, order);
342 353
343 free_pages((unsigned long)vaddr, order); 354 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
344} 355}
345EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); 356EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
346 357