summaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2016-11-02 07:12:47 -0400
committerKonrad Rzeszutek Wilk <konrad@kernel.org>2016-11-07 15:06:32 -0500
commit7641842164c34b672ef3e70e881e8a72735305c1 (patch)
tree3cc9ce655ba49870ff070225b231f1d2074566b3 /drivers/xen
parentebcf6f979d55f35dfe36956364f0dce8c738220b (diff)
swiotlb-xen: Enforce return of DMA_ERROR_CODE in mapping function
The mapping function should always return DMA_ERROR_CODE when a mapping has failed as this is what the DMA API expects when a DMA error has occurred. The current function for mapping a page in Xen was returning either DMA_ERROR_CODE or 0 depending on where it failed. On x86 DMA_ERROR_CODE is 0, but on other architectures such as ARM it is ~0. We need to make sure we return the same error value if either the mapping failed or the device is not capable of accessing the mapping. If we are returning DMA_ERROR_CODE as our error value we can drop the function for checking the error code as the default is to compare the return value against DMA_ERROR_CODE if no function is defined. Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad@kernel.org>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/swiotlb-xen.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 87e6035c9e81..b8014bf2b2ed 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -416,11 +416,12 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
416 /* 416 /*
417 * Ensure that the address returned is DMA'ble 417 * Ensure that the address returned is DMA'ble
418 */ 418 */
419 if (!dma_capable(dev, dev_addr, size)) { 419 if (dma_capable(dev, dev_addr, size))
420 swiotlb_tbl_unmap_single(dev, map, size, dir); 420 return dev_addr;
421 dev_addr = 0; 421
422 } 422 swiotlb_tbl_unmap_single(dev, map, size, dir);
423 return dev_addr; 423
424 return DMA_ERROR_CODE;
424} 425}
425EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); 426EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
426 427
@@ -648,13 +649,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
648} 649}
649EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); 650EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
650 651
651int
652xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
653{
654 return !dma_addr;
655}
656EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
657
658/* 652/*
659 * Return whether the given device DMA address mask can be supported 653 * Return whether the given device DMA address mask can be supported
660 * properly. For example, if your device can only drive the low 24-bits 654 * properly. For example, if your device can only drive the low 24-bits