aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-10-15 13:19:39 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-10-30 09:32:06 -0400
commite05ed4d1fad9e730995abb08cb9bc3bffac5018b (patch)
tree72a2662e8b85308bad057c904a5231d890d7fbba /lib/swiotlb.c
parentee3f6ba896c7e62004b677b0018a0b29b9b26472 (diff)
swiotlb: Return physical addresses when calling swiotlb_tbl_map_single
This change makes it so that swiotlb_tbl_map_single will return a physical address instead of a virtual address when called. The advantage to this once again is that we are avoiding a number of virt_to_phys and phys_to_virt translations by working with everything as a physical address. One change I had to make in order to support using physical addresses is that I could no longer trust 0 to be a invalid physical address on all platforms. So instead I made it so that ~0 is returned on error. This should never be a valid return value as it implies that only one byte would be available for use. In order to clarify things since we now have 2 physical addresses in use inside of swiotlb_tbl_map_single I am renaming phys to orig_addr, and dma_addr to tlb_addr. This way is should be clear that orig_addr is contained within io_orig_addr and tlb_addr is an address within the io_tlb_addr buffer. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c78
1 files changed, 40 insertions, 38 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index f8c0d4e1d1d3..3adc148bb8d8 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -393,12 +393,13 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
393} 393}
394EXPORT_SYMBOL_GPL(swiotlb_bounce); 394EXPORT_SYMBOL_GPL(swiotlb_bounce);
395 395
396void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, 396phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
397 phys_addr_t phys, size_t size, 397 dma_addr_t tbl_dma_addr,
398 enum dma_data_direction dir) 398 phys_addr_t orig_addr, size_t size,
399 enum dma_data_direction dir)
399{ 400{
400 unsigned long flags; 401 unsigned long flags;
401 char *dma_addr; 402 phys_addr_t tlb_addr;
402 unsigned int nslots, stride, index, wrap; 403 unsigned int nslots, stride, index, wrap;
403 int i; 404 int i;
404 unsigned long mask; 405 unsigned long mask;
@@ -462,7 +463,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
462 io_tlb_list[i] = 0; 463 io_tlb_list[i] = 0;
463 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) 464 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
464 io_tlb_list[i] = ++count; 465 io_tlb_list[i] = ++count;
465 dma_addr = (char *)phys_to_virt(io_tlb_start) + (index << IO_TLB_SHIFT); 466 tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
466 467
467 /* 468 /*
468 * Update the indices to avoid searching in the next 469 * Update the indices to avoid searching in the next
@@ -480,7 +481,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
480 481
481not_found: 482not_found:
482 spin_unlock_irqrestore(&io_tlb_lock, flags); 483 spin_unlock_irqrestore(&io_tlb_lock, flags);
483 return NULL; 484 return SWIOTLB_MAP_ERROR;
484found: 485found:
485 spin_unlock_irqrestore(&io_tlb_lock, flags); 486 spin_unlock_irqrestore(&io_tlb_lock, flags);
486 487
@@ -490,11 +491,12 @@ found:
490 * needed. 491 * needed.
491 */ 492 */
492 for (i = 0; i < nslots; i++) 493 for (i = 0; i < nslots; i++)
493 io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT); 494 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
494 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 495 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
495 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); 496 swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), size,
497 DMA_TO_DEVICE);
496 498
497 return dma_addr; 499 return tlb_addr;
498} 500}
499EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); 501EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
500 502
@@ -502,9 +504,8 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
502 * Allocates bounce buffer and returns its kernel virtual address. 504 * Allocates bounce buffer and returns its kernel virtual address.
503 */ 505 */
504 506
505static void * 507phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size,
506map_single(struct device *hwdev, phys_addr_t phys, size_t size, 508 enum dma_data_direction dir)
507 enum dma_data_direction dir)
508{ 509{
509 dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); 510 dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
510 511
@@ -598,12 +599,15 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
598 dma_mask = hwdev->coherent_dma_mask; 599 dma_mask = hwdev->coherent_dma_mask;
599 600
600 ret = (void *)__get_free_pages(flags, order); 601 ret = (void *)__get_free_pages(flags, order);
601 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) { 602 if (ret) {
602 /* 603 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
603 * The allocated memory isn't reachable by the device. 604 if (dev_addr + size - 1 > dma_mask) {
604 */ 605 /*
605 free_pages((unsigned long) ret, order); 606 * The allocated memory isn't reachable by the device.
606 ret = NULL; 607 */
608 free_pages((unsigned long) ret, order);
609 ret = NULL;
610 }
607 } 611 }
608 if (!ret) { 612 if (!ret) {
609 /* 613 /*
@@ -611,13 +615,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
611 * GFP_DMA memory; fall back on map_single(), which 615 * GFP_DMA memory; fall back on map_single(), which
612 * will grab memory from the lowest available address range. 616 * will grab memory from the lowest available address range.
613 */ 617 */
614 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 618 phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
615 if (!ret) 619 if (paddr == SWIOTLB_MAP_ERROR)
616 return NULL; 620 return NULL;
617 }
618 621
619 memset(ret, 0, size); 622 ret = phys_to_virt(paddr);
620 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 623 dev_addr = phys_to_dma(hwdev, paddr);
624 }
621 625
622 /* Confirm address can be DMA'd by device */ 626 /* Confirm address can be DMA'd by device */
623 if (dev_addr + size - 1 > dma_mask) { 627 if (dev_addr + size - 1 > dma_mask) {
@@ -629,7 +633,10 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
629 swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 633 swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
630 return NULL; 634 return NULL;
631 } 635 }
636
632 *dma_handle = dev_addr; 637 *dma_handle = dev_addr;
638 memset(ret, 0, size);
639
633 return ret; 640 return ret;
634} 641}
635EXPORT_SYMBOL(swiotlb_alloc_coherent); 642EXPORT_SYMBOL(swiotlb_alloc_coherent);
@@ -686,9 +693,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
686 enum dma_data_direction dir, 693 enum dma_data_direction dir,
687 struct dma_attrs *attrs) 694 struct dma_attrs *attrs)
688{ 695{
689 phys_addr_t phys = page_to_phys(page) + offset; 696 phys_addr_t map, phys = page_to_phys(page) + offset;
690 dma_addr_t dev_addr = phys_to_dma(dev, phys); 697 dma_addr_t dev_addr = phys_to_dma(dev, phys);
691 void *map;
692 698
693 BUG_ON(dir == DMA_NONE); 699 BUG_ON(dir == DMA_NONE);
694 /* 700 /*
@@ -699,22 +705,18 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
699 if (dma_capable(dev, dev_addr, size) && !swiotlb_force) 705 if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
700 return dev_addr; 706 return dev_addr;
701 707
702 /* 708 /* Oh well, have to allocate and map a bounce buffer. */
703 * Oh well, have to allocate and map a bounce buffer.
704 */
705 map = map_single(dev, phys, size, dir); 709 map = map_single(dev, phys, size, dir);
706 if (!map) { 710 if (map == SWIOTLB_MAP_ERROR) {
707 swiotlb_full(dev, size, dir, 1); 711 swiotlb_full(dev, size, dir, 1);
708 return phys_to_dma(dev, io_tlb_overflow_buffer); 712 return phys_to_dma(dev, io_tlb_overflow_buffer);
709 } 713 }
710 714
711 dev_addr = swiotlb_virt_to_bus(dev, map); 715 dev_addr = phys_to_dma(dev, map);
712 716
713 /* 717 /* Ensure that the address returned is DMA'ble */
714 * Ensure that the address returned is DMA'ble
715 */
716 if (!dma_capable(dev, dev_addr, size)) { 718 if (!dma_capable(dev, dev_addr, size)) {
717 swiotlb_tbl_unmap_single(dev, map, size, dir); 719 swiotlb_tbl_unmap_single(dev, phys_to_virt(map), size, dir);
718 return phys_to_dma(dev, io_tlb_overflow_buffer); 720 return phys_to_dma(dev, io_tlb_overflow_buffer);
719 } 721 }
720 722
@@ -840,9 +842,9 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
840 842
841 if (swiotlb_force || 843 if (swiotlb_force ||
842 !dma_capable(hwdev, dev_addr, sg->length)) { 844 !dma_capable(hwdev, dev_addr, sg->length)) {
843 void *map = map_single(hwdev, sg_phys(sg), 845 phys_addr_t map = map_single(hwdev, sg_phys(sg),
844 sg->length, dir); 846 sg->length, dir);
845 if (!map) { 847 if (map == SWIOTLB_MAP_ERROR) {
846 /* Don't panic here, we expect map_sg users 848 /* Don't panic here, we expect map_sg users
847 to do proper error handling. */ 849 to do proper error handling. */
848 swiotlb_full(hwdev, sg->length, dir, 0); 850 swiotlb_full(hwdev, sg->length, dir, 0);
@@ -851,7 +853,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
851 sgl[0].dma_length = 0; 853 sgl[0].dma_length = 0;
852 return 0; 854 return 0;
853 } 855 }
854 sg->dma_address = swiotlb_virt_to_bus(hwdev, map); 856 sg->dma_address = phys_to_dma(hwdev, map);
855 } else 857 } else
856 sg->dma_address = dev_addr; 858 sg->dma_address = dev_addr;
857 sg->dma_length = sg->length; 859 sg->dma_length = sg->length;