diff options
author | Becky Bruce <becky.bruce@freescale.com> | 2008-09-08 05:09:53 -0400 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2008-09-24 17:26:42 -0400 |
commit | 8dd0e95206f7c33bed2aed33ac668335174684e8 (patch) | |
tree | fc338d97889c873bc6cf241da8469763f9d05629 /arch/powerpc/kernel/dma-iommu.c | |
parent | 7c05d7e08d907d66b8e18515572f42c71fb709fe (diff) |
powerpc: Move iommu dma ops from dma.c to dma-iommu.c
32-bit platforms are about to start using dma.c; move the iommu
dma ops into their own file to make this a bit cleaner.
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/dma-iommu.c')
-rw-r--r-- | arch/powerpc/kernel/dma-iommu.c | 103 |
1 files changed, 103 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c new file mode 100644 index 000000000000..01091f1d508c --- /dev/null +++ b/arch/powerpc/kernel/dma-iommu.c | |||
@@ -0,0 +1,103 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation | ||
3 | * | ||
4 | * Provide default implementations of the DMA mapping callbacks for | ||
5 | * busses using the iommu infrastructure | ||
6 | */ | ||
7 | |||
8 | #include <asm/iommu.h> | ||
9 | |||
10 | /* | ||
11 | * Generic iommu implementation | ||
12 | */ | ||
13 | |||
14 | /* Allocates a contiguous real buffer and creates mappings over it. | ||
15 | * Returns the virtual address of the buffer and sets dma_handle | ||
16 | * to the dma address (mapping) of the first page. | ||
17 | */ | ||
18 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | ||
19 | dma_addr_t *dma_handle, gfp_t flag) | ||
20 | { | ||
21 | return iommu_alloc_coherent(dev, dev->archdata.dma_data, size, | ||
22 | dma_handle, device_to_mask(dev), flag, | ||
23 | dev->archdata.numa_node); | ||
24 | } | ||
25 | |||
26 | static void dma_iommu_free_coherent(struct device *dev, size_t size, | ||
27 | void *vaddr, dma_addr_t dma_handle) | ||
28 | { | ||
29 | iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); | ||
30 | } | ||
31 | |||
32 | /* Creates TCEs for a user provided buffer. The user buffer must be | ||
33 | * contiguous real kernel storage (not vmalloc). The address of the buffer | ||
34 | * passed here is the kernel (virtual) address of the buffer. The buffer | ||
35 | * need not be page aligned, the dma_addr_t returned will point to the same | ||
36 | * byte within the page as vaddr. | ||
37 | */ | ||
38 | static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, | ||
39 | size_t size, | ||
40 | enum dma_data_direction direction, | ||
41 | struct dma_attrs *attrs) | ||
42 | { | ||
43 | return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size, | ||
44 | device_to_mask(dev), direction, attrs); | ||
45 | } | ||
46 | |||
47 | |||
48 | static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, | ||
49 | size_t size, | ||
50 | enum dma_data_direction direction, | ||
51 | struct dma_attrs *attrs) | ||
52 | { | ||
53 | iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction, | ||
54 | attrs); | ||
55 | } | ||
56 | |||
57 | |||
58 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | ||
59 | int nelems, enum dma_data_direction direction, | ||
60 | struct dma_attrs *attrs) | ||
61 | { | ||
62 | return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems, | ||
63 | device_to_mask(dev), direction, attrs); | ||
64 | } | ||
65 | |||
66 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
67 | int nelems, enum dma_data_direction direction, | ||
68 | struct dma_attrs *attrs) | ||
69 | { | ||
70 | iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction, | ||
71 | attrs); | ||
72 | } | ||
73 | |||
74 | /* We support DMA to/from any memory page via the iommu */ | ||
75 | static int dma_iommu_dma_supported(struct device *dev, u64 mask) | ||
76 | { | ||
77 | struct iommu_table *tbl = dev->archdata.dma_data; | ||
78 | |||
79 | if (!tbl || tbl->it_offset > mask) { | ||
80 | printk(KERN_INFO | ||
81 | "Warning: IOMMU offset too big for device mask\n"); | ||
82 | if (tbl) | ||
83 | printk(KERN_INFO | ||
84 | "mask: 0x%08lx, table offset: 0x%08lx\n", | ||
85 | mask, tbl->it_offset); | ||
86 | else | ||
87 | printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", | ||
88 | mask); | ||
89 | return 0; | ||
90 | } else | ||
91 | return 1; | ||
92 | } | ||
93 | |||
94 | struct dma_mapping_ops dma_iommu_ops = { | ||
95 | .alloc_coherent = dma_iommu_alloc_coherent, | ||
96 | .free_coherent = dma_iommu_free_coherent, | ||
97 | .map_single = dma_iommu_map_single, | ||
98 | .unmap_single = dma_iommu_unmap_single, | ||
99 | .map_sg = dma_iommu_map_sg, | ||
100 | .unmap_sg = dma_iommu_unmap_sg, | ||
101 | .dma_supported = dma_iommu_dma_supported, | ||
102 | }; | ||
103 | EXPORT_SYMBOL(dma_iommu_ops); | ||