aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/swiotlb.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index cae806d1ef12..c2fc470b6ed8 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -17,17 +17,17 @@
17 */ 17 */
18 18
19#include <linux/cache.h> 19#include <linux/cache.h>
20#include <linux/dma-mapping.h>
20#include <linux/mm.h> 21#include <linux/mm.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/ctype.h> 26#include <linux/ctype.h>
27 27
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/pci.h>
30#include <asm/dma.h> 29#include <asm/dma.h>
30#include <asm/scatterlist.h>
31 31
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
@@ -127,7 +127,7 @@ __setup("swiotlb=", setup_io_tlb_npages);
127 127
128/* 128/*
129 * Statically reserve bounce buffer space and initialize bounce buffer data 129 * Statically reserve bounce buffer space and initialize bounce buffer data
130 * structures for the software IO TLB used to implement the PCI DMA API. 130 * structures for the software IO TLB used to implement the DMA API.
131 */ 131 */
132void 132void
133swiotlb_init_with_default_size (size_t default_size) 133swiotlb_init_with_default_size (size_t default_size)
@@ -502,24 +502,24 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
502 /* 502 /*
503 * Ran out of IOMMU space for this operation. This is very bad. 503 * Ran out of IOMMU space for this operation. This is very bad.
504 * Unfortunately the drivers cannot handle this operation properly. 504 * Unfortunately the drivers cannot handle this operation properly.
505 * unless they check for pci_dma_mapping_error (most don't) 505 * unless they check for dma_mapping_error (most don't)
506 * When the mapping is small enough return a static buffer to limit 506 * When the mapping is small enough return a static buffer to limit
507 * the damage, or panic when the transfer is too big. 507 * the damage, or panic when the transfer is too big.
508 */ 508 */
509 printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at " 509 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at "
510 "device %s\n", size, dev ? dev->bus_id : "?"); 510 "device %s\n", size, dev ? dev->bus_id : "?");
511 511
512 if (size > io_tlb_overflow && do_panic) { 512 if (size > io_tlb_overflow && do_panic) {
513 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) 513 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
514 panic("PCI-DMA: Memory would be corrupted\n"); 514 panic("DMA: Memory would be corrupted\n");
515 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) 515 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
516 panic("PCI-DMA: Random memory would be DMAed\n"); 516 panic("DMA: Random memory would be DMAed\n");
517 } 517 }
518} 518}
519 519
520/* 520/*
521 * Map a single buffer of the indicated size for DMA in streaming mode. The 521 * Map a single buffer of the indicated size for DMA in streaming mode. The
522 * PCI address to use is returned. 522 * physical address to use is returned.
523 * 523 *
524 * Once the device is given the dma address, the device owns this memory until 524 * Once the device is given the dma address, the device owns this memory until
525 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. 525 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
@@ -606,8 +606,8 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
606 * after a transfer. 606 * after a transfer.
607 * 607 *
608 * If you perform a swiotlb_map_single() but wish to interrogate the buffer 608 * If you perform a swiotlb_map_single() but wish to interrogate the buffer
609 * using the cpu, yet do not wish to teardown the PCI dma mapping, you must 609 * using the cpu, yet do not wish to teardown the dma mapping, you must
610 * call this function before doing so. At the next point you give the PCI dma 610 * call this function before doing so. At the next point you give the dma
611 * address back to the card, you must first perform a 611 * address back to the card, you must first perform a
612 * swiotlb_dma_sync_for_device, and then the device again owns the buffer 612 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
613 */ 613 */
@@ -783,9 +783,9 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
783} 783}
784 784
785/* 785/*
786 * Return whether the given PCI device DMA address mask can be supported 786 * Return whether the given device DMA address mask can be supported
787 * properly. For example, if your device can only drive the low 24-bits 787 * properly. For example, if your device can only drive the low 24-bits
788 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to 788 * during bus mastering, then you would pass 0x00ffffff as the mask to
789 * this function. 789 * this function.
790 */ 790 */
791int 791int