aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2013-10-25 06:33:25 -0400
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>2013-10-25 06:33:25 -0400
commit6cf054636261ca5c88f3c2984058d51f927b8a2e (patch)
tree7d3bad304a71b6d0c855d8caf3adb3d6bc03a6d6
parent7100b077ab4ff5fb0ba7760ce54465f623a0a763 (diff)
swiotlb-xen: use xen_dma_map/unmap_page, xen_dma_sync_single_for_cpu/device
Call xen_dma_map_page, xen_dma_unmap_page, xen_dma_sync_single_for_cpu, xen_dma_sync_single_for_device from swiotlb-xen to ensure cpu/device coherency of the pages used for DMA, including the ones belonging to the swiotlb buffer. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
-rw-r--r--drivers/xen/swiotlb-xen.c39
1 files changed, 31 insertions, 8 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 189b8db5c983..4221cb52387d 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -378,8 +378,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
378 * buffering it. 378 * buffering it.
379 */ 379 */
380 if (dma_capable(dev, dev_addr, size) && 380 if (dma_capable(dev, dev_addr, size) &&
381 !range_straddles_page_boundary(phys, size) && !swiotlb_force) 381 !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
382 /* we are not interested in the dma_addr returned by
383 * xen_dma_map_page, only in the potential cache flushes executed
384 * by the function. */
385 xen_dma_map_page(dev, page, offset, size, dir, attrs);
382 return dev_addr; 386 return dev_addr;
387 }
383 388
384 /* 389 /*
385 * Oh well, have to allocate and map a bounce buffer. 390 * Oh well, have to allocate and map a bounce buffer.
@@ -388,6 +393,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
388 if (map == SWIOTLB_MAP_ERROR) 393 if (map == SWIOTLB_MAP_ERROR)
389 return DMA_ERROR_CODE; 394 return DMA_ERROR_CODE;
390 395
396 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
397 map & ~PAGE_MASK, size, dir, attrs);
391 dev_addr = xen_phys_to_bus(map); 398 dev_addr = xen_phys_to_bus(map);
392 399
393 /* 400 /*
@@ -410,12 +417,15 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
410 * whatever the device wrote there. 417 * whatever the device wrote there.
411 */ 418 */
412static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, 419static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
413 size_t size, enum dma_data_direction dir) 420 size_t size, enum dma_data_direction dir,
421 struct dma_attrs *attrs)
414{ 422{
415 phys_addr_t paddr = xen_bus_to_phys(dev_addr); 423 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
416 424
417 BUG_ON(dir == DMA_NONE); 425 BUG_ON(dir == DMA_NONE);
418 426
427 xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
428
419 /* NOTE: We use dev_addr here, not paddr! */ 429 /* NOTE: We use dev_addr here, not paddr! */
420 if (is_xen_swiotlb_buffer(dev_addr)) { 430 if (is_xen_swiotlb_buffer(dev_addr)) {
421 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); 431 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
@@ -438,7 +448,7 @@ void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
438 size_t size, enum dma_data_direction dir, 448 size_t size, enum dma_data_direction dir,
439 struct dma_attrs *attrs) 449 struct dma_attrs *attrs)
440{ 450{
441 xen_unmap_single(hwdev, dev_addr, size, dir); 451 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
442} 452}
443EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); 453EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
444 454
@@ -461,11 +471,15 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
461 471
462 BUG_ON(dir == DMA_NONE); 472 BUG_ON(dir == DMA_NONE);
463 473
474 if (target == SYNC_FOR_CPU)
475 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
476
464 /* NOTE: We use dev_addr here, not paddr! */ 477 /* NOTE: We use dev_addr here, not paddr! */
465 if (is_xen_swiotlb_buffer(dev_addr)) { 478 if (is_xen_swiotlb_buffer(dev_addr))
466 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); 479 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
467 return; 480
468 } 481 if (target == SYNC_FOR_DEVICE)
482 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
469 483
470 if (dir != DMA_FROM_DEVICE) 484 if (dir != DMA_FROM_DEVICE)
471 return; 485 return;
@@ -536,8 +550,17 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
536 return DMA_ERROR_CODE; 550 return DMA_ERROR_CODE;
537 } 551 }
538 sg->dma_address = xen_phys_to_bus(map); 552 sg->dma_address = xen_phys_to_bus(map);
539 } else 553 } else {
554 /* we are not interested in the dma_addr returned by
555 * xen_dma_map_page, only in the potential cache flushes executed
556 * by the function. */
557 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
558 paddr & ~PAGE_MASK,
559 sg->length,
560 dir,
561 attrs);
540 sg->dma_address = dev_addr; 562 sg->dma_address = dev_addr;
563 }
541 sg_dma_len(sg) = sg->length; 564 sg_dma_len(sg) = sg->length;
542 } 565 }
543 return nelems; 566 return nelems;
@@ -559,7 +582,7 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
559 BUG_ON(dir == DMA_NONE); 582 BUG_ON(dir == DMA_NONE);
560 583
561 for_each_sg(sgl, sg, nelems, i) 584 for_each_sg(sgl, sg, nelems, i)
562 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir); 585 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
563 586
564} 587}
565EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); 588EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);