diff options
-rw-r--r-- | include/asm-powerpc/dma-mapping.h (renamed from include/asm-ppc/dma-mapping.h) | 128 | ||||
-rw-r--r-- | include/asm-ppc64/dma-mapping.h | 136 |
2 files changed, 93 insertions, 171 deletions
diff --git a/include/asm-ppc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h index 798602620e87..59a80163f75f 100644 --- a/include/asm-ppc/dma-mapping.h +++ b/include/asm-powerpc/dma-mapping.h | |||
@@ -1,15 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * This is based on both include/asm-sh/dma-mapping.h and | 2 | * Copyright (C) 2004 IBM |
3 | * include/asm-ppc/pci.h | 3 | * |
4 | * Implements the generic device dma API for powerpc. | ||
5 | * the pci and vio busses | ||
4 | */ | 6 | */ |
5 | #ifndef __ASM_PPC_DMA_MAPPING_H | 7 | #ifndef _ASM_DMA_MAPPING_H |
6 | #define __ASM_PPC_DMA_MAPPING_H | 8 | #define _ASM_DMA_MAPPING_H |
7 | 9 | ||
8 | #include <linux/config.h> | 10 | #include <linux/config.h> |
11 | #include <linux/types.h> | ||
12 | #include <linux/cache.h> | ||
9 | /* need struct page definitions */ | 13 | /* need struct page definitions */ |
10 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
11 | #include <asm/scatterlist.h> | 15 | #include <asm/scatterlist.h> |
12 | #include <asm/io.h> | 16 | #include <asm/io.h> |
17 | #include <asm/bug.h> | ||
18 | |||
19 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | ||
13 | 20 | ||
14 | #ifdef CONFIG_NOT_COHERENT_CACHE | 21 | #ifdef CONFIG_NOT_COHERENT_CACHE |
15 | /* | 22 | /* |
@@ -37,6 +44,30 @@ extern void __dma_sync_page(struct page *page, unsigned long offset, | |||
37 | 44 | ||
38 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ | 45 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ |
39 | 46 | ||
47 | #ifdef CONFIG_PPC64 | ||
48 | |||
49 | extern int dma_supported(struct device *dev, u64 mask); | ||
50 | extern int dma_set_mask(struct device *dev, u64 dma_mask); | ||
51 | extern void *dma_alloc_coherent(struct device *dev, size_t size, | ||
52 | dma_addr_t *dma_handle, gfp_t flag); | ||
53 | extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
54 | dma_addr_t dma_handle); | ||
55 | extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
56 | size_t size, enum dma_data_direction direction); | ||
57 | extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
58 | size_t size, enum dma_data_direction direction); | ||
59 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
60 | unsigned long offset, size_t size, | ||
61 | enum dma_data_direction direction); | ||
62 | extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
63 | size_t size, enum dma_data_direction direction); | ||
64 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
65 | enum dma_data_direction direction); | ||
66 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
67 | int nhwentries, enum dma_data_direction direction); | ||
68 | |||
69 | #else /* CONFIG_PPC64 */ | ||
70 | |||
40 | #define dma_supported(dev, mask) (1) | 71 | #define dma_supported(dev, mask) (1) |
41 | 72 | ||
42 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | 73 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) |
@@ -134,29 +165,27 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
134 | /* We don't do anything here. */ | 165 | /* We don't do anything here. */ |
135 | #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) | 166 | #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) |
136 | 167 | ||
137 | static inline void | 168 | #endif /* CONFIG_PPC64 */ |
138 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 169 | |
139 | size_t size, | 170 | static inline void dma_sync_single_for_cpu(struct device *dev, |
140 | enum dma_data_direction direction) | 171 | dma_addr_t dma_handle, size_t size, |
172 | enum dma_data_direction direction) | ||
141 | { | 173 | { |
142 | BUG_ON(direction == DMA_NONE); | 174 | BUG_ON(direction == DMA_NONE); |
143 | |||
144 | __dma_sync(bus_to_virt(dma_handle), size, direction); | 175 | __dma_sync(bus_to_virt(dma_handle), size, direction); |
145 | } | 176 | } |
146 | 177 | ||
147 | static inline void | 178 | static inline void dma_sync_single_for_device(struct device *dev, |
148 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 179 | dma_addr_t dma_handle, size_t size, |
149 | size_t size, | 180 | enum dma_data_direction direction) |
150 | enum dma_data_direction direction) | ||
151 | { | 181 | { |
152 | BUG_ON(direction == DMA_NONE); | 182 | BUG_ON(direction == DMA_NONE); |
153 | |||
154 | __dma_sync(bus_to_virt(dma_handle), size, direction); | 183 | __dma_sync(bus_to_virt(dma_handle), size, direction); |
155 | } | 184 | } |
156 | 185 | ||
157 | static inline void | 186 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
158 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | 187 | struct scatterlist *sg, int nents, |
159 | enum dma_data_direction direction) | 188 | enum dma_data_direction direction) |
160 | { | 189 | { |
161 | int i; | 190 | int i; |
162 | 191 | ||
@@ -166,9 +195,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | |||
166 | __dma_sync_page(sg->page, sg->offset, sg->length, direction); | 195 | __dma_sync_page(sg->page, sg->offset, sg->length, direction); |
167 | } | 196 | } |
168 | 197 | ||
169 | static inline void | 198 | static inline void dma_sync_sg_for_device(struct device *dev, |
170 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | 199 | struct scatterlist *sg, int nents, |
171 | enum dma_data_direction direction) | 200 | enum dma_data_direction direction) |
172 | { | 201 | { |
173 | int i; | 202 | int i; |
174 | 203 | ||
@@ -178,6 +207,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | |||
178 | __dma_sync_page(sg->page, sg->offset, sg->length, direction); | 207 | __dma_sync_page(sg->page, sg->offset, sg->length, direction); |
179 | } | 208 | } |
180 | 209 | ||
210 | static inline int dma_mapping_error(dma_addr_t dma_addr) | ||
211 | { | ||
212 | #ifdef CONFIG_PPC64 | ||
213 | return (dma_addr == DMA_ERROR_CODE); | ||
214 | #else | ||
215 | return 0; | ||
216 | #endif | ||
217 | } | ||
218 | |||
181 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 219 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
182 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 220 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
183 | #ifdef CONFIG_NOT_COHERENT_CACHE | 221 | #ifdef CONFIG_NOT_COHERENT_CACHE |
@@ -188,40 +226,60 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | |||
188 | 226 | ||
189 | static inline int dma_get_cache_alignment(void) | 227 | static inline int dma_get_cache_alignment(void) |
190 | { | 228 | { |
229 | #ifdef CONFIG_PPC64 | ||
230 | /* no easy way to get cache size on all processors, so return | ||
231 | * the maximum possible, to be safe */ | ||
232 | return (1 << L1_CACHE_SHIFT_MAX); | ||
233 | #else | ||
191 | /* | 234 | /* |
192 | * Each processor family will define its own L1_CACHE_SHIFT, | 235 | * Each processor family will define its own L1_CACHE_SHIFT, |
193 | * L1_CACHE_BYTES wraps to this, so this is always safe. | 236 | * L1_CACHE_BYTES wraps to this, so this is always safe. |
194 | */ | 237 | */ |
195 | return L1_CACHE_BYTES; | 238 | return L1_CACHE_BYTES; |
239 | #endif | ||
196 | } | 240 | } |
197 | 241 | ||
198 | static inline void | 242 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
199 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | 243 | dma_addr_t dma_handle, unsigned long offset, size_t size, |
200 | unsigned long offset, size_t size, | 244 | enum dma_data_direction direction) |
201 | enum dma_data_direction direction) | ||
202 | { | 245 | { |
203 | /* just sync everything for now */ | 246 | /* just sync everything for now */ |
204 | dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); | 247 | dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); |
205 | } | 248 | } |
206 | 249 | ||
207 | static inline void | 250 | static inline void dma_sync_single_range_for_device(struct device *dev, |
208 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | 251 | dma_addr_t dma_handle, unsigned long offset, size_t size, |
209 | unsigned long offset, size_t size, | 252 | enum dma_data_direction direction) |
210 | enum dma_data_direction direction) | ||
211 | { | 253 | { |
212 | /* just sync everything for now */ | 254 | /* just sync everything for now */ |
213 | dma_sync_single_for_device(dev, dma_handle, offset + size, direction); | 255 | dma_sync_single_for_device(dev, dma_handle, offset + size, direction); |
214 | } | 256 | } |
215 | 257 | ||
216 | static inline void dma_cache_sync(void *vaddr, size_t size, | 258 | static inline void dma_cache_sync(void *vaddr, size_t size, |
217 | enum dma_data_direction direction) | 259 | enum dma_data_direction direction) |
218 | { | 260 | { |
261 | BUG_ON(direction == DMA_NONE); | ||
219 | __dma_sync(vaddr, size, (int)direction); | 262 | __dma_sync(vaddr, size, (int)direction); |
220 | } | 263 | } |
221 | 264 | ||
222 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 265 | /* |
223 | { | 266 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO |
224 | return 0; | 267 | */ |
225 | } | 268 | struct dma_mapping_ops { |
226 | 269 | void * (*alloc_coherent)(struct device *dev, size_t size, | |
227 | #endif /* __ASM_PPC_DMA_MAPPING_H */ | 270 | dma_addr_t *dma_handle, gfp_t flag); |
271 | void (*free_coherent)(struct device *dev, size_t size, | ||
272 | void *vaddr, dma_addr_t dma_handle); | ||
273 | dma_addr_t (*map_single)(struct device *dev, void *ptr, | ||
274 | size_t size, enum dma_data_direction direction); | ||
275 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, | ||
276 | size_t size, enum dma_data_direction direction); | ||
277 | int (*map_sg)(struct device *dev, struct scatterlist *sg, | ||
278 | int nents, enum dma_data_direction direction); | ||
279 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | ||
280 | int nents, enum dma_data_direction direction); | ||
281 | int (*dma_supported)(struct device *dev, u64 mask); | ||
282 | int (*dac_dma_supported)(struct device *dev, u64 mask); | ||
283 | }; | ||
284 | |||
285 | #endif /* _ASM_DMA_MAPPING_H */ | ||
diff --git a/include/asm-ppc64/dma-mapping.h b/include/asm-ppc64/dma-mapping.h deleted file mode 100644 index fb68fa23bea8..000000000000 --- a/include/asm-ppc64/dma-mapping.h +++ /dev/null | |||
@@ -1,136 +0,0 @@ | |||
1 | /* Copyright (C) 2004 IBM | ||
2 | * | ||
3 | * Implements the generic device dma API for ppc64. Handles | ||
4 | * the pci and vio busses | ||
5 | */ | ||
6 | |||
7 | #ifndef _ASM_DMA_MAPPING_H | ||
8 | #define _ASM_DMA_MAPPING_H | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | #include <linux/cache.h> | ||
12 | /* need struct page definitions */ | ||
13 | #include <linux/mm.h> | ||
14 | #include <asm/scatterlist.h> | ||
15 | #include <asm/bug.h> | ||
16 | |||
17 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | ||
18 | |||
19 | extern int dma_supported(struct device *dev, u64 mask); | ||
20 | extern int dma_set_mask(struct device *dev, u64 dma_mask); | ||
21 | extern void *dma_alloc_coherent(struct device *dev, size_t size, | ||
22 | dma_addr_t *dma_handle, gfp_t flag); | ||
23 | extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
24 | dma_addr_t dma_handle); | ||
25 | extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
26 | size_t size, enum dma_data_direction direction); | ||
27 | extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
28 | size_t size, enum dma_data_direction direction); | ||
29 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
30 | unsigned long offset, size_t size, | ||
31 | enum dma_data_direction direction); | ||
32 | extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
33 | size_t size, enum dma_data_direction direction); | ||
34 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
35 | enum dma_data_direction direction); | ||
36 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
37 | int nhwentries, enum dma_data_direction direction); | ||
38 | |||
39 | static inline void | ||
40 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
41 | enum dma_data_direction direction) | ||
42 | { | ||
43 | BUG_ON(direction == DMA_NONE); | ||
44 | /* nothing to do */ | ||
45 | } | ||
46 | |||
47 | static inline void | ||
48 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
49 | enum dma_data_direction direction) | ||
50 | { | ||
51 | BUG_ON(direction == DMA_NONE); | ||
52 | /* nothing to do */ | ||
53 | } | ||
54 | |||
55 | static inline void | ||
56 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
57 | enum dma_data_direction direction) | ||
58 | { | ||
59 | BUG_ON(direction == DMA_NONE); | ||
60 | /* nothing to do */ | ||
61 | } | ||
62 | |||
63 | static inline void | ||
64 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
65 | enum dma_data_direction direction) | ||
66 | { | ||
67 | BUG_ON(direction == DMA_NONE); | ||
68 | /* nothing to do */ | ||
69 | } | ||
70 | |||
71 | static inline int dma_mapping_error(dma_addr_t dma_addr) | ||
72 | { | ||
73 | return (dma_addr == DMA_ERROR_CODE); | ||
74 | } | ||
75 | |||
76 | /* Now for the API extensions over the pci_ one */ | ||
77 | |||
78 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
79 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
80 | #define dma_is_consistent(d) (1) | ||
81 | |||
82 | static inline int | ||
83 | dma_get_cache_alignment(void) | ||
84 | { | ||
85 | /* no easy way to get cache size on all processors, so return | ||
86 | * the maximum possible, to be safe */ | ||
87 | return (1 << L1_CACHE_SHIFT_MAX); | ||
88 | } | ||
89 | |||
90 | static inline void | ||
91 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
92 | unsigned long offset, size_t size, | ||
93 | enum dma_data_direction direction) | ||
94 | { | ||
95 | BUG_ON(direction == DMA_NONE); | ||
96 | /* nothing to do */ | ||
97 | } | ||
98 | |||
99 | static inline void | ||
100 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
101 | unsigned long offset, size_t size, | ||
102 | enum dma_data_direction direction) | ||
103 | { | ||
104 | BUG_ON(direction == DMA_NONE); | ||
105 | /* nothing to do */ | ||
106 | } | ||
107 | |||
108 | static inline void | ||
109 | dma_cache_sync(void *vaddr, size_t size, | ||
110 | enum dma_data_direction direction) | ||
111 | { | ||
112 | BUG_ON(direction == DMA_NONE); | ||
113 | /* nothing to do */ | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO | ||
118 | */ | ||
119 | struct dma_mapping_ops { | ||
120 | void * (*alloc_coherent)(struct device *dev, size_t size, | ||
121 | dma_addr_t *dma_handle, gfp_t flag); | ||
122 | void (*free_coherent)(struct device *dev, size_t size, | ||
123 | void *vaddr, dma_addr_t dma_handle); | ||
124 | dma_addr_t (*map_single)(struct device *dev, void *ptr, | ||
125 | size_t size, enum dma_data_direction direction); | ||
126 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, | ||
127 | size_t size, enum dma_data_direction direction); | ||
128 | int (*map_sg)(struct device *dev, struct scatterlist *sg, | ||
129 | int nents, enum dma_data_direction direction); | ||
130 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | ||
131 | int nents, enum dma_data_direction direction); | ||
132 | int (*dma_supported)(struct device *dev, u64 mask); | ||
133 | int (*dac_dma_supported)(struct device *dev, u64 mask); | ||
134 | }; | ||
135 | |||
136 | #endif /* _ASM_DMA_MAPPING_H */ | ||