summaryrefslogtreecommitdiffstats
path: root/drivers/xen/swiotlb-xen.c
diff options
context:
space:
mode:
authorStefano Stabellini <sstabellini@kernel.org>2017-01-19 13:39:09 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2017-01-20 08:46:37 -0500
commitf1225ee4c8fcf09afaa199b8b1f0450f38b8cd11 (patch)
tree11f43863f5cc53bb54d1dba9c8254998c47840e7 /drivers/xen/swiotlb-xen.c
parent602d9858f07c72eab64f5f00e2fae55f9902cfbe (diff)
swiotlb-xen: update dev_addr after swapping pages
In xen_swiotlb_map_page and xen_swiotlb_map_sg_attrs, if the original page is not suitable, we swap it for another page from the swiotlb pool. In these cases, we don't update the previously calculated dma address for the page before calling xen_dma_map_page. Thus, we end up calling xen_dma_map_page passing the wrong dev_addr, resulting in xen_dma_map_page mistakenly assuming that the page is foreign when it is local. Fix the bug by updating dev_addr appropriately. This change has no effect on x86, because xen_dma_map_page is a stub there. Signed-off-by: Stefano Stabellini <sstabellini@kernel.org> Signed-off-by: Pooya Keshavarzi <Pooya.Keshavarzi@de.bosch.com> Tested-by: Pooya Keshavarzi <Pooya.Keshavarzi@de.bosch.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r--drivers/xen/swiotlb-xen.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f905d6eeb048..f8afc6dcc29f 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
414 if (map == SWIOTLB_MAP_ERROR) 414 if (map == SWIOTLB_MAP_ERROR)
415 return DMA_ERROR_CODE; 415 return DMA_ERROR_CODE;
416 416
417 dev_addr = xen_phys_to_bus(map);
417 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), 418 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
418 dev_addr, map & ~PAGE_MASK, size, dir, attrs); 419 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
419 dev_addr = xen_phys_to_bus(map);
420 420
421 /* 421 /*
422 * Ensure that the address returned is DMA'ble 422 * Ensure that the address returned is DMA'ble
@@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
575 sg_dma_len(sgl) = 0; 575 sg_dma_len(sgl) = 0;
576 return 0; 576 return 0;
577 } 577 }
578 dev_addr = xen_phys_to_bus(map);
578 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), 579 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
579 dev_addr, 580 dev_addr,
580 map & ~PAGE_MASK, 581 map & ~PAGE_MASK,
581 sg->length, 582 sg->length,
582 dir, 583 dir,
583 attrs); 584 attrs);
584 sg->dma_address = xen_phys_to_bus(map); 585 sg->dma_address = dev_addr;
585 } else { 586 } else {
586 /* we are not interested in the dma_addr returned by 587 /* we are not interested in the dma_addr returned by
587 * xen_dma_map_page, only in the potential cache flushes executed 588 * xen_dma_map_page, only in the potential cache flushes executed