diff options
| -rw-r--r-- | drivers/xen/swiotlb-xen.c | 31 |
1 files changed, 16 insertions, 15 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index ba558094d0e6..64cb94dfedd4 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
| @@ -388,13 +388,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
| 388 | if (dma_capable(dev, dev_addr, size) && | 388 | if (dma_capable(dev, dev_addr, size) && |
| 389 | !range_straddles_page_boundary(phys, size) && | 389 | !range_straddles_page_boundary(phys, size) && |
| 390 | !xen_arch_need_swiotlb(dev, phys, dev_addr) && | 390 | !xen_arch_need_swiotlb(dev, phys, dev_addr) && |
| 391 | (swiotlb_force != SWIOTLB_FORCE)) { | 391 | swiotlb_force != SWIOTLB_FORCE) |
| 392 | /* we are not interested in the dma_addr returned by | 392 | goto done; |
| 393 | * xen_dma_map_page, only in the potential cache flushes executed | ||
| 394 | * by the function. */ | ||
| 395 | xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); | ||
| 396 | return dev_addr; | ||
| 397 | } | ||
| 398 | 393 | ||
| 399 | /* | 394 | /* |
| 400 | * Oh well, have to allocate and map a bounce buffer. | 395 | * Oh well, have to allocate and map a bounce buffer. |
| @@ -407,19 +402,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
| 407 | return DMA_MAPPING_ERROR; | 402 | return DMA_MAPPING_ERROR; |
| 408 | 403 | ||
| 409 | dev_addr = xen_phys_to_bus(map); | 404 | dev_addr = xen_phys_to_bus(map); |
| 410 | xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), | ||
| 411 | dev_addr, map & ~PAGE_MASK, size, dir, attrs); | ||
| 412 | 405 | ||
| 413 | /* | 406 | /* |
| 414 | * Ensure that the address returned is DMA'ble | 407 | * Ensure that the address returned is DMA'ble |
| 415 | */ | 408 | */ |
| 416 | if (dma_capable(dev, dev_addr, size)) | 409 | if (unlikely(!dma_capable(dev, dev_addr, size))) { |
| 417 | return dev_addr; | 410 | swiotlb_tbl_unmap_single(dev, map, size, dir, |
| 418 | 411 | attrs | DMA_ATTR_SKIP_CPU_SYNC); | |
| 419 | attrs |= DMA_ATTR_SKIP_CPU_SYNC; | 412 | return DMA_MAPPING_ERROR; |
| 420 | swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); | 413 | } |
| 421 | 414 | ||
| 422 | return DMA_MAPPING_ERROR; | 415 | page = pfn_to_page(map >> PAGE_SHIFT); |
| 416 | offset = map & ~PAGE_MASK; | ||
| 417 | done: | ||
| 418 | /* | ||
| 419 | * we are not interested in the dma_addr returned by xen_dma_map_page, | ||
| 420 | * only in the potential cache flushes executed by the function. | ||
| 421 | */ | ||
| 422 | xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); | ||
| 423 | return dev_addr; | ||
| 423 | } | 424 | } |
| 424 | 425 | ||
| 425 | /* | 426 | /* |
