diff options
author | Becky Bruce <becky.bruce@freescale.com> | 2008-09-08 05:09:52 -0400 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2008-09-24 17:26:41 -0400 |
commit | 7c05d7e08d907d66b8e18515572f42c71fb709fe (patch) | |
tree | 6f5c6549f6d515c38a0ece662da4ae1f1ec43579 /arch/powerpc/kernel/dma.c | |
parent | 1afb7f809bfb8fad9eec9419f3dfd75cee746ebd (diff) |
powerpc: Rename dma_64.c to dma.c
This is in preparation for the merge of the 32 and 64-bit
dma code in arch/powerpc.
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/dma.c')
-rw-r--r-- | arch/powerpc/kernel/dma.c | 200 |
1 files changed, 200 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c new file mode 100644 index 000000000000..ae5708e3a312 --- /dev/null +++ b/arch/powerpc/kernel/dma.c | |||
@@ -0,0 +1,200 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation | ||
3 | * | ||
4 | * Provide default implementations of the DMA mapping callbacks for | ||
5 | * directly mapped busses and busses using the iommu infrastructure | ||
6 | */ | ||
7 | |||
8 | #include <linux/device.h> | ||
9 | #include <linux/dma-mapping.h> | ||
10 | #include <asm/bug.h> | ||
11 | #include <asm/iommu.h> | ||
12 | #include <asm/abs_addr.h> | ||
13 | |||
14 | /* | ||
15 | * Generic iommu implementation | ||
16 | */ | ||
17 | |||
18 | /* Allocates a contiguous real buffer and creates mappings over it. | ||
19 | * Returns the virtual address of the buffer and sets dma_handle | ||
20 | * to the dma address (mapping) of the first page. | ||
21 | */ | ||
22 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | ||
23 | dma_addr_t *dma_handle, gfp_t flag) | ||
24 | { | ||
25 | return iommu_alloc_coherent(dev, dev->archdata.dma_data, size, | ||
26 | dma_handle, device_to_mask(dev), flag, | ||
27 | dev->archdata.numa_node); | ||
28 | } | ||
29 | |||
30 | static void dma_iommu_free_coherent(struct device *dev, size_t size, | ||
31 | void *vaddr, dma_addr_t dma_handle) | ||
32 | { | ||
33 | iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); | ||
34 | } | ||
35 | |||
36 | /* Creates TCEs for a user provided buffer. The user buffer must be | ||
37 | * contiguous real kernel storage (not vmalloc). The address of the buffer | ||
38 | * passed here is the kernel (virtual) address of the buffer. The buffer | ||
39 | * need not be page aligned, the dma_addr_t returned will point to the same | ||
40 | * byte within the page as vaddr. | ||
41 | */ | ||
42 | static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, | ||
43 | size_t size, | ||
44 | enum dma_data_direction direction, | ||
45 | struct dma_attrs *attrs) | ||
46 | { | ||
47 | return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size, | ||
48 | device_to_mask(dev), direction, attrs); | ||
49 | } | ||
50 | |||
51 | |||
52 | static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, | ||
53 | size_t size, | ||
54 | enum dma_data_direction direction, | ||
55 | struct dma_attrs *attrs) | ||
56 | { | ||
57 | iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction, | ||
58 | attrs); | ||
59 | } | ||
60 | |||
61 | |||
62 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | ||
63 | int nelems, enum dma_data_direction direction, | ||
64 | struct dma_attrs *attrs) | ||
65 | { | ||
66 | return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems, | ||
67 | device_to_mask(dev), direction, attrs); | ||
68 | } | ||
69 | |||
70 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
71 | int nelems, enum dma_data_direction direction, | ||
72 | struct dma_attrs *attrs) | ||
73 | { | ||
74 | iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction, | ||
75 | attrs); | ||
76 | } | ||
77 | |||
78 | /* We support DMA to/from any memory page via the iommu */ | ||
79 | static int dma_iommu_dma_supported(struct device *dev, u64 mask) | ||
80 | { | ||
81 | struct iommu_table *tbl = dev->archdata.dma_data; | ||
82 | |||
83 | if (!tbl || tbl->it_offset > mask) { | ||
84 | printk(KERN_INFO | ||
85 | "Warning: IOMMU offset too big for device mask\n"); | ||
86 | if (tbl) | ||
87 | printk(KERN_INFO | ||
88 | "mask: 0x%08lx, table offset: 0x%08lx\n", | ||
89 | mask, tbl->it_offset); | ||
90 | else | ||
91 | printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", | ||
92 | mask); | ||
93 | return 0; | ||
94 | } else | ||
95 | return 1; | ||
96 | } | ||
97 | |||
98 | struct dma_mapping_ops dma_iommu_ops = { | ||
99 | .alloc_coherent = dma_iommu_alloc_coherent, | ||
100 | .free_coherent = dma_iommu_free_coherent, | ||
101 | .map_single = dma_iommu_map_single, | ||
102 | .unmap_single = dma_iommu_unmap_single, | ||
103 | .map_sg = dma_iommu_map_sg, | ||
104 | .unmap_sg = dma_iommu_unmap_sg, | ||
105 | .dma_supported = dma_iommu_dma_supported, | ||
106 | }; | ||
107 | EXPORT_SYMBOL(dma_iommu_ops); | ||
108 | |||
109 | /* | ||
110 | * Generic direct DMA implementation | ||
111 | * | ||
112 | * This implementation supports a per-device offset that can be applied if | ||
113 | * the address at which memory is visible to devices is not 0. Platform code | ||
114 | * can set archdata.dma_data to an unsigned long holding the offset. By | ||
115 | * default the offset is zero. | ||
116 | */ | ||
117 | |||
118 | static unsigned long get_dma_direct_offset(struct device *dev) | ||
119 | { | ||
120 | return (unsigned long)dev->archdata.dma_data; | ||
121 | } | ||
122 | |||
123 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | ||
124 | dma_addr_t *dma_handle, gfp_t flag) | ||
125 | { | ||
126 | struct page *page; | ||
127 | void *ret; | ||
128 | int node = dev->archdata.numa_node; | ||
129 | |||
130 | page = alloc_pages_node(node, flag, get_order(size)); | ||
131 | if (page == NULL) | ||
132 | return NULL; | ||
133 | ret = page_address(page); | ||
134 | memset(ret, 0, size); | ||
135 | *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev); | ||
136 | |||
137 | return ret; | ||
138 | } | ||
139 | |||
140 | static void dma_direct_free_coherent(struct device *dev, size_t size, | ||
141 | void *vaddr, dma_addr_t dma_handle) | ||
142 | { | ||
143 | free_pages((unsigned long)vaddr, get_order(size)); | ||
144 | } | ||
145 | |||
146 | static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr, | ||
147 | size_t size, | ||
148 | enum dma_data_direction direction, | ||
149 | struct dma_attrs *attrs) | ||
150 | { | ||
151 | return virt_to_abs(ptr) + get_dma_direct_offset(dev); | ||
152 | } | ||
153 | |||
154 | static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
155 | size_t size, | ||
156 | enum dma_data_direction direction, | ||
157 | struct dma_attrs *attrs) | ||
158 | { | ||
159 | } | ||
160 | |||
161 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | ||
162 | int nents, enum dma_data_direction direction, | ||
163 | struct dma_attrs *attrs) | ||
164 | { | ||
165 | struct scatterlist *sg; | ||
166 | int i; | ||
167 | |||
168 | for_each_sg(sgl, sg, nents, i) { | ||
169 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); | ||
170 | sg->dma_length = sg->length; | ||
171 | } | ||
172 | |||
173 | return nents; | ||
174 | } | ||
175 | |||
176 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
177 | int nents, enum dma_data_direction direction, | ||
178 | struct dma_attrs *attrs) | ||
179 | { | ||
180 | } | ||
181 | |||
182 | static int dma_direct_dma_supported(struct device *dev, u64 mask) | ||
183 | { | ||
184 | /* Could be improved to check for memory though it better be | ||
185 | * done via some global so platforms can set the limit in case | ||
186 | * they have limited DMA windows | ||
187 | */ | ||
188 | return mask >= DMA_32BIT_MASK; | ||
189 | } | ||
190 | |||
191 | struct dma_mapping_ops dma_direct_ops = { | ||
192 | .alloc_coherent = dma_direct_alloc_coherent, | ||
193 | .free_coherent = dma_direct_free_coherent, | ||
194 | .map_single = dma_direct_map_single, | ||
195 | .unmap_single = dma_direct_unmap_single, | ||
196 | .map_sg = dma_direct_map_sg, | ||
197 | .unmap_sg = dma_direct_unmap_sg, | ||
198 | .dma_supported = dma_direct_dma_supported, | ||
199 | }; | ||
200 | EXPORT_SYMBOL(dma_direct_ops); | ||