diff options
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index ebd8f218a788..810ad419e34c 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -96,8 +96,6 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) | |||
96 | dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; | 96 | dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; |
97 | phys_addr_t paddr = dma; | 97 | phys_addr_t paddr = dma; |
98 | 98 | ||
99 | BUG_ON(paddr != dma); /* truncation has occurred, should never happen */ | ||
100 | |||
101 | paddr |= baddr & ~PAGE_MASK; | 99 | paddr |= baddr & ~PAGE_MASK; |
102 | 100 | ||
103 | return paddr; | 101 | return paddr; |
@@ -399,11 +397,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
399 | * buffering it. | 397 | * buffering it. |
400 | */ | 398 | */ |
401 | if (dma_capable(dev, dev_addr, size) && | 399 | if (dma_capable(dev, dev_addr, size) && |
402 | !range_straddles_page_boundary(phys, size) && !swiotlb_force) { | 400 | !range_straddles_page_boundary(phys, size) && |
401 | !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) && | ||
402 | !swiotlb_force) { | ||
403 | /* we are not interested in the dma_addr returned by | 403 | /* we are not interested in the dma_addr returned by |
404 | * xen_dma_map_page, only in the potential cache flushes executed | 404 | * xen_dma_map_page, only in the potential cache flushes executed |
405 | * by the function. */ | 405 | * by the function. */ |
406 | xen_dma_map_page(dev, page, offset, size, dir, attrs); | 406 | xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); |
407 | return dev_addr; | 407 | return dev_addr; |
408 | } | 408 | } |
409 | 409 | ||
@@ -417,7 +417,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
417 | return DMA_ERROR_CODE; | 417 | return DMA_ERROR_CODE; |
418 | 418 | ||
419 | xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), | 419 | xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), |
420 | map & ~PAGE_MASK, size, dir, attrs); | 420 | dev_addr, map & ~PAGE_MASK, size, dir, attrs); |
421 | dev_addr = xen_phys_to_bus(map); | 421 | dev_addr = xen_phys_to_bus(map); |
422 | 422 | ||
423 | /* | 423 | /* |
@@ -447,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | |||
447 | 447 | ||
448 | BUG_ON(dir == DMA_NONE); | 448 | BUG_ON(dir == DMA_NONE); |
449 | 449 | ||
450 | xen_dma_unmap_page(hwdev, paddr, size, dir, attrs); | 450 | xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); |
451 | 451 | ||
452 | /* NOTE: We use dev_addr here, not paddr! */ | 452 | /* NOTE: We use dev_addr here, not paddr! */ |
453 | if (is_xen_swiotlb_buffer(dev_addr)) { | 453 | if (is_xen_swiotlb_buffer(dev_addr)) { |
@@ -495,14 +495,14 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | |||
495 | BUG_ON(dir == DMA_NONE); | 495 | BUG_ON(dir == DMA_NONE); |
496 | 496 | ||
497 | if (target == SYNC_FOR_CPU) | 497 | if (target == SYNC_FOR_CPU) |
498 | xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); | 498 | xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir); |
499 | 499 | ||
500 | /* NOTE: We use dev_addr here, not paddr! */ | 500 | /* NOTE: We use dev_addr here, not paddr! */ |
501 | if (is_xen_swiotlb_buffer(dev_addr)) | 501 | if (is_xen_swiotlb_buffer(dev_addr)) |
502 | swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); | 502 | swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); |
503 | 503 | ||
504 | if (target == SYNC_FOR_DEVICE) | 504 | if (target == SYNC_FOR_DEVICE) |
505 | xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); | 505 | xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); |
506 | 506 | ||
507 | if (dir != DMA_FROM_DEVICE) | 507 | if (dir != DMA_FROM_DEVICE) |
508 | return; | 508 | return; |
@@ -557,6 +557,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
557 | dma_addr_t dev_addr = xen_phys_to_bus(paddr); | 557 | dma_addr_t dev_addr = xen_phys_to_bus(paddr); |
558 | 558 | ||
559 | if (swiotlb_force || | 559 | if (swiotlb_force || |
560 | xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) || | ||
560 | !dma_capable(hwdev, dev_addr, sg->length) || | 561 | !dma_capable(hwdev, dev_addr, sg->length) || |
561 | range_straddles_page_boundary(paddr, sg->length)) { | 562 | range_straddles_page_boundary(paddr, sg->length)) { |
562 | phys_addr_t map = swiotlb_tbl_map_single(hwdev, | 563 | phys_addr_t map = swiotlb_tbl_map_single(hwdev, |
@@ -574,6 +575,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
574 | return 0; | 575 | return 0; |
575 | } | 576 | } |
576 | xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), | 577 | xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), |
578 | dev_addr, | ||
577 | map & ~PAGE_MASK, | 579 | map & ~PAGE_MASK, |
578 | sg->length, | 580 | sg->length, |
579 | dir, | 581 | dir, |
@@ -584,6 +586,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
584 | * xen_dma_map_page, only in the potential cache flushes executed | 586 | * xen_dma_map_page, only in the potential cache flushes executed |
585 | * by the function. */ | 587 | * by the function. */ |
586 | xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), | 588 | xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), |
589 | dev_addr, | ||
587 | paddr & ~PAGE_MASK, | 590 | paddr & ~PAGE_MASK, |
588 | sg->length, | 591 | sg->length, |
589 | dir, | 592 | dir, |