diff options
author | Muli Ben-Yehuda <mulix@mulix.org> | 2006-01-11 16:44:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-11 22:04:55 -0500 |
commit | 17a941d854a3f7b0bb916fdeee4c9ffdcc19d8d3 (patch) | |
tree | b6b3b55318336adf769bf57141a01a9defbbb202 /arch/x86_64/kernel/pci-swiotlb.c | |
parent | 8a6fdd3e912e0ce6f723431d66baf704bf8a1d26 (diff) |
[PATCH] x86_64: Use function pointers to call DMA mapping functions
AK: I hacked Muli's original patch a lot and there were a lot
of changes - all bugs are probably to blame on me now.
There were also some changes in the fall back behaviour
for swiotlb - in particular it doesn't try to use GFP_DMA
now anymore. Also all DMA mapping operations use the
same core dma_alloc_coherent code with proper fallbacks now.
And various other changes and cleanups.
Known problems: iommu=force swiotlb=force together breaks
needs more testing.
This patch cleans up x86_64's DMA mapping dispatching code. Right now
we have three possible IOMMU types: AGP GART, swiotlb and nommu, and
in the future we will also have Xen's x86_64 swiotlb and other HW
IOMMUs for x86_64. In order to support all of them cleanly, this
patch:
- introduces a struct dma_mapping_ops with function pointers for each
of the DMA mapping operations of gart (AMD HW IOMMU), swiotlb
(software IOMMU) and nommu (no IOMMU).
- gets rid of:
if (swiotlb)
return swiotlb_xxx();
- PCI_DMA_BUS_IS_PHYS is now checked against the dma_ops being set
This makes swiotlb faster by avoiding double copying in some cases.
Signed-Off-By: Muli Ben-Yehuda <mulix@mulix.org>
Signed-Off-By: Jon D. Mason <jdmason@us.ibm.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/pci-swiotlb.c')
-rw-r--r-- | arch/x86_64/kernel/pci-swiotlb.c | 42 |
1 files changed, 42 insertions, 0 deletions
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c new file mode 100644 index 000000000000..3569a25ad7fb --- /dev/null +++ b/arch/x86_64/kernel/pci-swiotlb.c | |||
@@ -0,0 +1,42 @@ | |||
1 | /* Glue code to lib/swiotlb.c */ | ||
2 | |||
3 | #include <linux/pci.h> | ||
4 | #include <linux/cache.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <asm/dma-mapping.h> | ||
7 | #include <asm/proto.h> | ||
8 | #include <asm/swiotlb.h> | ||
9 | #include <asm/dma.h> | ||
10 | |||
11 | int swiotlb __read_mostly; | ||
12 | EXPORT_SYMBOL(swiotlb); | ||
13 | |||
14 | struct dma_mapping_ops swiotlb_dma_ops = { | ||
15 | .mapping_error = swiotlb_dma_mapping_error, | ||
16 | .alloc_coherent = swiotlb_alloc_coherent, | ||
17 | .free_coherent = swiotlb_free_coherent, | ||
18 | .map_single = swiotlb_map_single, | ||
19 | .unmap_single = swiotlb_unmap_single, | ||
20 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | ||
21 | .sync_single_for_device = swiotlb_sync_single_for_device, | ||
22 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | ||
23 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | ||
24 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | ||
25 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | ||
26 | .map_sg = swiotlb_map_sg, | ||
27 | .unmap_sg = swiotlb_unmap_sg, | ||
28 | .dma_supported = NULL, | ||
29 | }; | ||
30 | |||
31 | void pci_swiotlb_init(void) | ||
32 | { | ||
33 | /* don't initialize swiotlb if iommu=off (no_iommu=1) */ | ||
34 | if (!iommu_aperture && !no_iommu && | ||
35 | (end_pfn > MAX_DMA32_PFN || force_iommu)) | ||
36 | swiotlb = 1; | ||
37 | if (swiotlb) { | ||
38 | swiotlb_init(); | ||
39 | printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); | ||
40 | dma_ops = &swiotlb_dma_ops; | ||
41 | } | ||
42 | } | ||