aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/swiotlb.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index b5f5d1133042..240a67c2c979 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -274,13 +274,13 @@ cleanup1:
274} 274}
275 275
276static int 276static int
277address_needs_mapping(struct device *hwdev, dma_addr_t addr) 277address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
278{ 278{
279 dma_addr_t mask = 0xffffffff; 279 dma_addr_t mask = 0xffffffff;
280 /* If the device has a mask, use it, otherwise default to 32 bits */ 280 /* If the device has a mask, use it, otherwise default to 32 bits */
281 if (hwdev && hwdev->dma_mask) 281 if (hwdev && hwdev->dma_mask)
282 mask = *hwdev->dma_mask; 282 mask = *hwdev->dma_mask;
283 return (addr & ~mask) != 0; 283 return !is_buffer_dma_capable(mask, addr, size);
284} 284}
285 285
286static int is_swiotlb_buffer(char *addr) 286static int is_swiotlb_buffer(char *addr)
@@ -473,7 +473,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
473 int order = get_order(size); 473 int order = get_order(size);
474 474
475 ret = (void *)__get_free_pages(flags, order); 475 ret = (void *)__get_free_pages(flags, order);
476 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { 476 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
477 /* 477 /*
478 * The allocated memory isn't reachable by the device. 478 * The allocated memory isn't reachable by the device.
479 * Fall back on swiotlb_map_single(). 479 * Fall back on swiotlb_map_single().
@@ -497,7 +497,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
497 dev_addr = virt_to_bus(ret); 497 dev_addr = virt_to_bus(ret);
498 498
499 /* Confirm address can be DMA'd by device */ 499 /* Confirm address can be DMA'd by device */
500 if (address_needs_mapping(hwdev, dev_addr)) { 500 if (address_needs_mapping(hwdev, dev_addr, size)) {
501 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 501 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
502 (unsigned long long)*hwdev->dma_mask, 502 (unsigned long long)*hwdev->dma_mask,
503 (unsigned long long)dev_addr); 503 (unsigned long long)dev_addr);
@@ -561,7 +561,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
561 * we can safely return the device addr and not worry about bounce 561 * we can safely return the device addr and not worry about bounce
562 * buffering it. 562 * buffering it.
563 */ 563 */
564 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) 564 if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
565 return dev_addr; 565 return dev_addr;
566 566
567 /* 567 /*
@@ -578,7 +578,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
578 /* 578 /*
579 * Ensure that the address returned is DMA'ble 579 * Ensure that the address returned is DMA'ble
580 */ 580 */
581 if (address_needs_mapping(hwdev, dev_addr)) 581 if (address_needs_mapping(hwdev, dev_addr, size))
582 panic("map_single: bounce buffer is not DMA'ble"); 582 panic("map_single: bounce buffer is not DMA'ble");
583 583
584 return dev_addr; 584 return dev_addr;
@@ -721,7 +721,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
721 for_each_sg(sgl, sg, nelems, i) { 721 for_each_sg(sgl, sg, nelems, i) {
722 addr = SG_ENT_VIRT_ADDRESS(sg); 722 addr = SG_ENT_VIRT_ADDRESS(sg);
723 dev_addr = virt_to_bus(addr); 723 dev_addr = virt_to_bus(addr);
724 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 724 if (swiotlb_force ||
725 address_needs_mapping(hwdev, dev_addr, sg->length)) {
725 void *map = map_single(hwdev, addr, sg->length, dir); 726 void *map = map_single(hwdev, addr, sg->length, dir);
726 if (!map) { 727 if (!map) {
727 /* Don't panic here, we expect map_sg users 728 /* Don't panic here, we expect map_sg users