aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 34278338aad0..bc684d1fd426 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -36,7 +36,7 @@
36 ( (val) & ( (align) - 1))) 36 ( (val) & ( (align) - 1)))
37 37
38#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 38#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
39#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) 39#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
40 40
41/* 41/*
42 * Maximum allowable number of contiguous slabs to map, 42 * Maximum allowable number of contiguous slabs to map,
@@ -163,7 +163,7 @@ swiotlb_init_with_default_size (size_t default_size)
163 */ 163 */
164 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 164 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
165 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", 165 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
166 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); 166 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
167} 167}
168 168
169void 169void
@@ -244,7 +244,7 @@ swiotlb_late_init_with_default_size (size_t default_size)
244 244
245 printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " 245 printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
246 "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, 246 "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
247 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); 247 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
248 248
249 return 0; 249 return 0;
250 250
@@ -445,7 +445,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
445 flags |= GFP_DMA; 445 flags |= GFP_DMA;
446 446
447 ret = (void *)__get_free_pages(flags, order); 447 ret = (void *)__get_free_pages(flags, order);
448 if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) { 448 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
449 /* 449 /*
450 * The allocated memory isn't reachable by the device. 450 * The allocated memory isn't reachable by the device.
451 * Fall back on swiotlb_map_single(). 451 * Fall back on swiotlb_map_single().
@@ -465,11 +465,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
465 if (swiotlb_dma_mapping_error(handle)) 465 if (swiotlb_dma_mapping_error(handle))
466 return NULL; 466 return NULL;
467 467
468 ret = phys_to_virt(handle); 468 ret = bus_to_virt(handle);
469 } 469 }
470 470
471 memset(ret, 0, size); 471 memset(ret, 0, size);
472 dev_addr = virt_to_phys(ret); 472 dev_addr = virt_to_bus(ret);
473 473
474 /* Confirm address can be DMA'd by device */ 474 /* Confirm address can be DMA'd by device */
475 if (address_needs_mapping(hwdev, dev_addr)) { 475 if (address_needs_mapping(hwdev, dev_addr)) {
@@ -525,7 +525,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
525dma_addr_t 525dma_addr_t
526swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) 526swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
527{ 527{
528 unsigned long dev_addr = virt_to_phys(ptr); 528 unsigned long dev_addr = virt_to_bus(ptr);
529 void *map; 529 void *map;
530 530
531 BUG_ON(dir == DMA_NONE); 531 BUG_ON(dir == DMA_NONE);
@@ -546,7 +546,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
546 map = io_tlb_overflow_buffer; 546 map = io_tlb_overflow_buffer;
547 } 547 }
548 548
549 dev_addr = virt_to_phys(map); 549 dev_addr = virt_to_bus(map);
550 550
551 /* 551 /*
552 * Ensure that the address returned is DMA'ble 552 * Ensure that the address returned is DMA'ble
@@ -569,7 +569,7 @@ void
569swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, 569swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
570 int dir) 570 int dir)
571{ 571{
572 char *dma_addr = phys_to_virt(dev_addr); 572 char *dma_addr = bus_to_virt(dev_addr);
573 573
574 BUG_ON(dir == DMA_NONE); 574 BUG_ON(dir == DMA_NONE);
575 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 575 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
@@ -592,7 +592,7 @@ static inline void
592swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 592swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
593 size_t size, int dir, int target) 593 size_t size, int dir, int target)
594{ 594{
595 char *dma_addr = phys_to_virt(dev_addr); 595 char *dma_addr = bus_to_virt(dev_addr);
596 596
597 BUG_ON(dir == DMA_NONE); 597 BUG_ON(dir == DMA_NONE);
598 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 598 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
@@ -623,7 +623,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
623 unsigned long offset, size_t size, 623 unsigned long offset, size_t size,
624 int dir, int target) 624 int dir, int target)
625{ 625{
626 char *dma_addr = phys_to_virt(dev_addr) + offset; 626 char *dma_addr = bus_to_virt(dev_addr) + offset;
627 627
628 BUG_ON(dir == DMA_NONE); 628 BUG_ON(dir == DMA_NONE);
629 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 629 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
@@ -676,7 +676,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
676 676
677 for (i = 0; i < nelems; i++, sg++) { 677 for (i = 0; i < nelems; i++, sg++) {
678 addr = SG_ENT_VIRT_ADDRESS(sg); 678 addr = SG_ENT_VIRT_ADDRESS(sg);
679 dev_addr = virt_to_phys(addr); 679 dev_addr = virt_to_bus(addr);
680 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 680 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
681 void *map = map_single(hwdev, addr, sg->length, dir); 681 void *map = map_single(hwdev, addr, sg->length, dir);
682 if (!map) { 682 if (!map) {
@@ -709,7 +709,8 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
709 709
710 for (i = 0; i < nelems; i++, sg++) 710 for (i = 0; i < nelems; i++, sg++)
711 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 711 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
712 unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); 712 unmap_single(hwdev, bus_to_virt(sg->dma_address),
713 sg->dma_length, dir);
713 else if (dir == DMA_FROM_DEVICE) 714 else if (dir == DMA_FROM_DEVICE)
714 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 715 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
715} 716}
@@ -731,7 +732,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
731 732
732 for (i = 0; i < nelems; i++, sg++) 733 for (i = 0; i < nelems; i++, sg++)
733 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 734 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
734 sync_single(hwdev, (void *) sg->dma_address, 735 sync_single(hwdev, bus_to_virt(sg->dma_address),
735 sg->dma_length, dir, target); 736 sg->dma_length, dir, target);
736 else if (dir == DMA_FROM_DEVICE) 737 else if (dir == DMA_FROM_DEVICE)
737 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 738 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
@@ -754,7 +755,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
754int 755int
755swiotlb_dma_mapping_error(dma_addr_t dma_addr) 756swiotlb_dma_mapping_error(dma_addr_t dma_addr)
756{ 757{
757 return (dma_addr == virt_to_phys(io_tlb_overflow_buffer)); 758 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
758} 759}
759 760
760/* 761/*
@@ -766,7 +767,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
766int 767int
767swiotlb_dma_supported (struct device *hwdev, u64 mask) 768swiotlb_dma_supported (struct device *hwdev, u64 mask)
768{ 769{
769 return virt_to_phys(io_tlb_end - 1) <= mask; 770 return virt_to_bus(io_tlb_end - 1) <= mask;
770} 771}
771 772
772EXPORT_SYMBOL(swiotlb_init); 773EXPORT_SYMBOL(swiotlb_init);