summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLu Baolu <baolu.lu@linux.intel.com>2019-09-06 02:14:48 -0400
committerJoerg Roedel <jroedel@suse.de>2019-09-11 06:34:29 -0400
commit3fc1ca00653db6371585e3c21c4b873b2f20e60a (patch)
tree0c6b5431d51319a078b036284c81ed12b7f55b54
parent2c70010867f164d1b30e787e360e05d10cc40046 (diff)
swiotlb: Split size parameter to map/unmap APIs
This splits the size parameter to swiotlb_tbl_map_single() and swiotlb_tbl_unmap_single() into an alloc_size and a mapping_size parameter, where the latter one is rounded up to the iommu page size. Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/xen/swiotlb-xen.c8
-rw-r--r--include/linux/swiotlb.h8
-rw-r--r--kernel/dma/direct.c2
-rw-r--r--kernel/dma/swiotlb.c34
4 files changed, 32 insertions, 20 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ae1df496bf38..adcabd9473eb 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -386,8 +386,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
386 */ 386 */
387 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); 387 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
388 388
389 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, 389 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
390 attrs); 390 size, size, dir, attrs);
391 if (map == (phys_addr_t)DMA_MAPPING_ERROR) 391 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
392 return DMA_MAPPING_ERROR; 392 return DMA_MAPPING_ERROR;
393 393
@@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
397 * Ensure that the address returned is DMA'ble 397 * Ensure that the address returned is DMA'ble
398 */ 398 */
399 if (unlikely(!dma_capable(dev, dev_addr, size))) { 399 if (unlikely(!dma_capable(dev, dev_addr, size))) {
400 swiotlb_tbl_unmap_single(dev, map, size, dir, 400 swiotlb_tbl_unmap_single(dev, map, size, size, dir,
401 attrs | DMA_ATTR_SKIP_CPU_SYNC); 401 attrs | DMA_ATTR_SKIP_CPU_SYNC);
402 return DMA_MAPPING_ERROR; 402 return DMA_MAPPING_ERROR;
403 } 403 }
@@ -433,7 +433,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
433 433
434 /* NOTE: We use dev_addr here, not paddr! */ 434 /* NOTE: We use dev_addr here, not paddr! */
435 if (is_xen_swiotlb_buffer(dev_addr)) 435 if (is_xen_swiotlb_buffer(dev_addr))
436 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); 436 swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
437} 437}
438 438
439static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 439static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 361f62bb4a8e..cde3dc18e21a 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -46,13 +46,17 @@ enum dma_sync_target {
46 46
47extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 47extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
48 dma_addr_t tbl_dma_addr, 48 dma_addr_t tbl_dma_addr,
49 phys_addr_t phys, size_t size, 49 phys_addr_t phys,
50 size_t mapping_size,
51 size_t alloc_size,
50 enum dma_data_direction dir, 52 enum dma_data_direction dir,
51 unsigned long attrs); 53 unsigned long attrs);
52 54
53extern void swiotlb_tbl_unmap_single(struct device *hwdev, 55extern void swiotlb_tbl_unmap_single(struct device *hwdev,
54 phys_addr_t tlb_addr, 56 phys_addr_t tlb_addr,
55 size_t size, enum dma_data_direction dir, 57 size_t mapping_size,
58 size_t alloc_size,
59 enum dma_data_direction dir,
56 unsigned long attrs); 60 unsigned long attrs);
57 61
58extern void swiotlb_tbl_sync_single(struct device *hwdev, 62extern void swiotlb_tbl_sync_single(struct device *hwdev,
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 795c9b095d75..a7f2a0163426 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -297,7 +297,7 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
297 dma_direct_sync_single_for_cpu(dev, addr, size, dir); 297 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
298 298
299 if (unlikely(is_swiotlb_buffer(phys))) 299 if (unlikely(is_swiotlb_buffer(phys)))
300 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); 300 swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
301} 301}
302EXPORT_SYMBOL(dma_direct_unmap_page); 302EXPORT_SYMBOL(dma_direct_unmap_page);
303 303
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 9de232229063..796a44f8ef5a 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -444,7 +444,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
444 444
445phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 445phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
446 dma_addr_t tbl_dma_addr, 446 dma_addr_t tbl_dma_addr,
447 phys_addr_t orig_addr, size_t size, 447 phys_addr_t orig_addr,
448 size_t mapping_size,
449 size_t alloc_size,
448 enum dma_data_direction dir, 450 enum dma_data_direction dir,
449 unsigned long attrs) 451 unsigned long attrs)
450{ 452{
@@ -464,6 +466,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
464 pr_warn_once("%s is active and system is using DMA bounce buffers\n", 466 pr_warn_once("%s is active and system is using DMA bounce buffers\n",
465 sme_active() ? "SME" : "SEV"); 467 sme_active() ? "SME" : "SEV");
466 468
469 if (mapping_size > alloc_size) {
470 dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
471 mapping_size, alloc_size);
472 return (phys_addr_t)DMA_MAPPING_ERROR;
473 }
474
467 mask = dma_get_seg_boundary(hwdev); 475 mask = dma_get_seg_boundary(hwdev);
468 476
469 tbl_dma_addr &= mask; 477 tbl_dma_addr &= mask;
@@ -471,8 +479,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
471 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 479 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
472 480
473 /* 481 /*
474 * Carefully handle integer overflow which can occur when mask == ~0UL. 482 * Carefully handle integer overflow which can occur when mask == ~0UL.
475 */ 483 */
476 max_slots = mask + 1 484 max_slots = mask + 1
477 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT 485 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
478 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 486 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
@@ -481,8 +489,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
481 * For mappings greater than or equal to a page, we limit the stride 489 * For mappings greater than or equal to a page, we limit the stride
482 * (and hence alignment) to a page size. 490 * (and hence alignment) to a page size.
483 */ 491 */
484 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 492 nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
485 if (size >= PAGE_SIZE) 493 if (alloc_size >= PAGE_SIZE)
486 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); 494 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
487 else 495 else
488 stride = 1; 496 stride = 1;
@@ -547,7 +555,7 @@ not_found:
547 spin_unlock_irqrestore(&io_tlb_lock, flags); 555 spin_unlock_irqrestore(&io_tlb_lock, flags);
548 if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) 556 if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
549 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", 557 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
550 size, io_tlb_nslabs, tmp_io_tlb_used); 558 alloc_size, io_tlb_nslabs, tmp_io_tlb_used);
551 return (phys_addr_t)DMA_MAPPING_ERROR; 559 return (phys_addr_t)DMA_MAPPING_ERROR;
552found: 560found:
553 io_tlb_used += nslots; 561 io_tlb_used += nslots;
@@ -562,7 +570,7 @@ found:
562 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); 570 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
563 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 571 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
564 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 572 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
565 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); 573 swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
566 574
567 return tlb_addr; 575 return tlb_addr;
568} 576}
@@ -571,11 +579,11 @@ found:
571 * tlb_addr is the physical address of the bounce buffer to unmap. 579 * tlb_addr is the physical address of the bounce buffer to unmap.
572 */ 580 */
573void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, 581void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
574 size_t size, enum dma_data_direction dir, 582 size_t mapping_size, size_t alloc_size,
575 unsigned long attrs) 583 enum dma_data_direction dir, unsigned long attrs)
576{ 584{
577 unsigned long flags; 585 unsigned long flags;
578 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 586 int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
579 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; 587 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
580 phys_addr_t orig_addr = io_tlb_orig_addr[index]; 588 phys_addr_t orig_addr = io_tlb_orig_addr[index];
581 589
@@ -585,7 +593,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
585 if (orig_addr != INVALID_PHYS_ADDR && 593 if (orig_addr != INVALID_PHYS_ADDR &&
586 !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 594 !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
587 ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 595 ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
588 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); 596 swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE);
589 597
590 /* 598 /*
591 * Return the buffer to the free list by setting the corresponding 599 * Return the buffer to the free list by setting the corresponding
@@ -665,14 +673,14 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
665 673
666 /* Oh well, have to allocate and map a bounce buffer. */ 674 /* Oh well, have to allocate and map a bounce buffer. */
667 *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), 675 *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
668 *phys, size, dir, attrs); 676 *phys, size, size, dir, attrs);
669 if (*phys == (phys_addr_t)DMA_MAPPING_ERROR) 677 if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
670 return false; 678 return false;
671 679
672 /* Ensure that the address returned is DMA'ble */ 680 /* Ensure that the address returned is DMA'ble */
673 *dma_addr = __phys_to_dma(dev, *phys); 681 *dma_addr = __phys_to_dma(dev, *phys);
674 if (unlikely(!dma_capable(dev, *dma_addr, size))) { 682 if (unlikely(!dma_capable(dev, *dma_addr, size))) {
675 swiotlb_tbl_unmap_single(dev, *phys, size, dir, 683 swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
676 attrs | DMA_ATTR_SKIP_CPU_SYNC); 684 attrs | DMA_ATTR_SKIP_CPU_SYNC);
677 return false; 685 return false;
678 } 686 }