summaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-04-11 03:20:00 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2019-05-02 19:52:06 -0400
commit063b8271ec8f706d833e61dfca40c512504a62c1 (patch)
treeb1ae9fd85840c7c1a5bf4514e984ea6ebb2574dd /drivers/xen
parent2e12dceef3d3fb8d796e384aa242062d1643cad4 (diff)
swiotlb-xen: ensure we have a single callsite for xen_dma_map_page
Refactor the code a bit to make further changes easier. Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/swiotlb-xen.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ba558094d0e6..64cb94dfedd4 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -388,13 +388,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
388 if (dma_capable(dev, dev_addr, size) && 388 if (dma_capable(dev, dev_addr, size) &&
389 !range_straddles_page_boundary(phys, size) && 389 !range_straddles_page_boundary(phys, size) &&
390 !xen_arch_need_swiotlb(dev, phys, dev_addr) && 390 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
391 (swiotlb_force != SWIOTLB_FORCE)) { 391 swiotlb_force != SWIOTLB_FORCE)
392 /* we are not interested in the dma_addr returned by 392 goto done;
393 * xen_dma_map_page, only in the potential cache flushes executed
394 * by the function. */
395 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
396 return dev_addr;
397 }
398 393
399 /* 394 /*
400 * Oh well, have to allocate and map a bounce buffer. 395 * Oh well, have to allocate and map a bounce buffer.
@@ -407,19 +402,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
407 return DMA_MAPPING_ERROR; 402 return DMA_MAPPING_ERROR;
408 403
409 dev_addr = xen_phys_to_bus(map); 404 dev_addr = xen_phys_to_bus(map);
410 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
411 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
412 405
413 /* 406 /*
414 * Ensure that the address returned is DMA'ble 407 * Ensure that the address returned is DMA'ble
415 */ 408 */
416 if (dma_capable(dev, dev_addr, size)) 409 if (unlikely(!dma_capable(dev, dev_addr, size))) {
417 return dev_addr; 410 swiotlb_tbl_unmap_single(dev, map, size, dir,
418 411 attrs | DMA_ATTR_SKIP_CPU_SYNC);
419 attrs |= DMA_ATTR_SKIP_CPU_SYNC; 412 return DMA_MAPPING_ERROR;
420 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); 413 }
421 414
422 return DMA_MAPPING_ERROR; 415 page = pfn_to_page(map >> PAGE_SHIFT);
416 offset = map & ~PAGE_MASK;
417done:
418 /*
419 * we are not interested in the dma_addr returned by xen_dma_map_page,
420 * only in the potential cache flushes executed by the function.
421 */
422 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
423 return dev_addr;
423} 424}
424 425
425/* 426/*