diff options
author | Becky Bruce <becky.bruce@freescale.com> | 2008-09-12 06:34:46 -0400 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2008-09-24 17:26:45 -0400 |
commit | 4fc665b88a79a45bae8bbf3a05563c27c7337c3d (patch) | |
tree | ca668c2fab7c3a4d62b92174f4a5fcae2625cdd1 /arch/powerpc/include | |
parent | 8fae0353247530d2124b2419052fa6120462fa99 (diff) |
powerpc: Merge 32 and 64-bit dma code
We essentially adopt the 64-bit dma code, with some changes to support
32-bit systems, including HIGHMEM. dma functions on 32-bit are now
invoked via accessor functions which call the correct op for a device based
on archdata dma_ops. If there is no archdata dma_ops, this defaults
to dma_direct_ops.
In addition, the dma_map/unmap_page functions are added to dma_ops
because we can't just fall back on map/unmap_single when HIGHMEM is
enabled. In the case of dma_direct_*, we stop using map/unmap_single
and just use the page version - this saves a lot of ugly
ifdeffing. We leave map/unmap_single in the dma_ops definition,
though, because they are needed by the iommu code, which does not
implement map/unmap_page. Ideally, going forward, we will completely
eliminate map/unmap_single and just have map/unmap_page, if it's
workable for 64-bit.
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 187 | ||||
-rw-r--r-- | arch/powerpc/include/asm/machdep.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pci.h | 14 |
3 files changed, 72 insertions, 134 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index c7ca45f97dd2..fddb229bd74f 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -44,8 +44,6 @@ extern void __dma_sync_page(struct page *page, unsigned long offset, | |||
44 | 44 | ||
45 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ | 45 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ |
46 | 46 | ||
47 | #ifdef CONFIG_PPC64 | ||
48 | |||
49 | static inline unsigned long device_to_mask(struct device *dev) | 47 | static inline unsigned long device_to_mask(struct device *dev) |
50 | { | 48 | { |
51 | if (dev->dma_mask && *dev->dma_mask) | 49 | if (dev->dma_mask && *dev->dma_mask) |
@@ -76,8 +74,24 @@ struct dma_mapping_ops { | |||
76 | struct dma_attrs *attrs); | 74 | struct dma_attrs *attrs); |
77 | int (*dma_supported)(struct device *dev, u64 mask); | 75 | int (*dma_supported)(struct device *dev, u64 mask); |
78 | int (*set_dma_mask)(struct device *dev, u64 dma_mask); | 76 | int (*set_dma_mask)(struct device *dev, u64 dma_mask); |
77 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | ||
78 | unsigned long offset, size_t size, | ||
79 | enum dma_data_direction direction, | ||
80 | struct dma_attrs *attrs); | ||
81 | void (*unmap_page)(struct device *dev, | ||
82 | dma_addr_t dma_address, size_t size, | ||
83 | enum dma_data_direction direction, | ||
84 | struct dma_attrs *attrs); | ||
79 | }; | 85 | }; |
80 | 86 | ||
87 | /* | ||
88 | * Available generic sets of operations | ||
89 | */ | ||
90 | #ifdef CONFIG_PPC64 | ||
91 | extern struct dma_mapping_ops dma_iommu_ops; | ||
92 | #endif | ||
93 | extern struct dma_mapping_ops dma_direct_ops; | ||
94 | |||
81 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | 95 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
82 | { | 96 | { |
83 | /* We don't handle the NULL dev case for ISA for now. We could | 97 | /* We don't handle the NULL dev case for ISA for now. We could |
@@ -85,8 +99,19 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |||
85 | * only ISA DMA device we support is the floppy and we have a hack | 99 | * only ISA DMA device we support is the floppy and we have a hack |
86 | * in the floppy driver directly to get a device for us. | 100 | * in the floppy driver directly to get a device for us. |
87 | */ | 101 | */ |
88 | if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) | 102 | |
103 | if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) { | ||
104 | #ifdef CONFIG_PPC64 | ||
89 | return NULL; | 105 | return NULL; |
106 | #else | ||
107 | /* Use default on 32-bit if dma_ops is not set up */ | ||
108 | /* TODO: Long term, we should fix drivers so that dev and | ||
109 | * archdata dma_ops are set up for all buses. | ||
110 | */ | ||
111 | return &dma_direct_ops; | ||
112 | #endif | ||
113 | } | ||
114 | |||
90 | return dev->archdata.dma_ops; | 115 | return dev->archdata.dma_ops; |
91 | } | 116 | } |
92 | 117 | ||
@@ -123,6 +148,12 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |||
123 | return 0; | 148 | return 0; |
124 | } | 149 | } |
125 | 150 | ||
151 | /* | ||
152 | * TODO: map_/unmap_single will ideally go away, to be completely | ||
153 | * replaced by map/unmap_page. Until then, we allow dma_ops to have | ||
154 | * one or the other, or both by checking to see if the specific | ||
155 | * function requested exists; and if not, falling back on the other set. | ||
156 | */ | ||
126 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, | 157 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, |
127 | void *cpu_addr, | 158 | void *cpu_addr, |
128 | size_t size, | 159 | size_t size, |
@@ -132,7 +163,14 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, | |||
132 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 163 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
133 | 164 | ||
134 | BUG_ON(!dma_ops); | 165 | BUG_ON(!dma_ops); |
135 | return dma_ops->map_single(dev, cpu_addr, size, direction, attrs); | 166 | |
167 | if (dma_ops->map_single) | ||
168 | return dma_ops->map_single(dev, cpu_addr, size, direction, | ||
169 | attrs); | ||
170 | |||
171 | return dma_ops->map_page(dev, virt_to_page(cpu_addr), | ||
172 | (unsigned long)cpu_addr % PAGE_SIZE, size, | ||
173 | direction, attrs); | ||
136 | } | 174 | } |
137 | 175 | ||
138 | static inline void dma_unmap_single_attrs(struct device *dev, | 176 | static inline void dma_unmap_single_attrs(struct device *dev, |
@@ -144,7 +182,13 @@ static inline void dma_unmap_single_attrs(struct device *dev, | |||
144 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 182 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
145 | 183 | ||
146 | BUG_ON(!dma_ops); | 184 | BUG_ON(!dma_ops); |
147 | dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); | 185 | |
186 | if (dma_ops->unmap_single) { | ||
187 | dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); | ||
188 | return; | ||
189 | } | ||
190 | |||
191 | dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); | ||
148 | } | 192 | } |
149 | 193 | ||
150 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | 194 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
@@ -156,8 +200,13 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, | |||
156 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 200 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
157 | 201 | ||
158 | BUG_ON(!dma_ops); | 202 | BUG_ON(!dma_ops); |
203 | |||
204 | if (dma_ops->map_page) | ||
205 | return dma_ops->map_page(dev, page, offset, size, direction, | ||
206 | attrs); | ||
207 | |||
159 | return dma_ops->map_single(dev, page_address(page) + offset, size, | 208 | return dma_ops->map_single(dev, page_address(page) + offset, size, |
160 | direction, attrs); | 209 | direction, attrs); |
161 | } | 210 | } |
162 | 211 | ||
163 | static inline void dma_unmap_page_attrs(struct device *dev, | 212 | static inline void dma_unmap_page_attrs(struct device *dev, |
@@ -169,6 +218,12 @@ static inline void dma_unmap_page_attrs(struct device *dev, | |||
169 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 218 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
170 | 219 | ||
171 | BUG_ON(!dma_ops); | 220 | BUG_ON(!dma_ops); |
221 | |||
222 | if (dma_ops->unmap_page) { | ||
223 | dma_ops->unmap_page(dev, dma_address, size, direction, attrs); | ||
224 | return; | ||
225 | } | ||
226 | |||
172 | dma_ops->unmap_single(dev, dma_address, size, direction, attrs); | 227 | dma_ops->unmap_single(dev, dma_address, size, direction, attrs); |
173 | } | 228 | } |
174 | 229 | ||
@@ -253,126 +308,6 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
253 | dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); | 308 | dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); |
254 | } | 309 | } |
255 | 310 | ||
256 | /* | ||
257 | * Available generic sets of operations | ||
258 | */ | ||
259 | extern struct dma_mapping_ops dma_iommu_ops; | ||
260 | extern struct dma_mapping_ops dma_direct_ops; | ||
261 | |||
262 | #else /* CONFIG_PPC64 */ | ||
263 | |||
264 | #define dma_supported(dev, mask) (1) | ||
265 | |||
266 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||
267 | { | ||
268 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
269 | return -EIO; | ||
270 | |||
271 | *dev->dma_mask = dma_mask; | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | ||
277 | dma_addr_t * dma_handle, | ||
278 | gfp_t gfp) | ||
279 | { | ||
280 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
281 | return __dma_alloc_coherent(size, dma_handle, gfp); | ||
282 | #else | ||
283 | void *ret; | ||
284 | /* ignore region specifiers */ | ||
285 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
286 | |||
287 | if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) | ||
288 | gfp |= GFP_DMA; | ||
289 | |||
290 | ret = (void *)__get_free_pages(gfp, get_order(size)); | ||
291 | |||
292 | if (ret != NULL) { | ||
293 | memset(ret, 0, size); | ||
294 | *dma_handle = virt_to_bus(ret); | ||
295 | } | ||
296 | |||
297 | return ret; | ||
298 | #endif | ||
299 | } | ||
300 | |||
301 | static inline void | ||
302 | dma_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
303 | dma_addr_t dma_handle) | ||
304 | { | ||
305 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
306 | __dma_free_coherent(size, vaddr); | ||
307 | #else | ||
308 | free_pages((unsigned long)vaddr, get_order(size)); | ||
309 | #endif | ||
310 | } | ||
311 | |||
312 | static inline dma_addr_t | ||
313 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
314 | enum dma_data_direction direction) | ||
315 | { | ||
316 | BUG_ON(direction == DMA_NONE); | ||
317 | |||
318 | __dma_sync(ptr, size, direction); | ||
319 | |||
320 | return virt_to_bus(ptr); | ||
321 | } | ||
322 | |||
323 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
324 | size_t size, | ||
325 | enum dma_data_direction direction) | ||
326 | { | ||
327 | /* We do nothing. */ | ||
328 | } | ||
329 | |||
330 | static inline dma_addr_t | ||
331 | dma_map_page(struct device *dev, struct page *page, | ||
332 | unsigned long offset, size_t size, | ||
333 | enum dma_data_direction direction) | ||
334 | { | ||
335 | BUG_ON(direction == DMA_NONE); | ||
336 | |||
337 | __dma_sync_page(page, offset, size, direction); | ||
338 | |||
339 | return page_to_bus(page) + offset; | ||
340 | } | ||
341 | |||
342 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
343 | size_t size, | ||
344 | enum dma_data_direction direction) | ||
345 | { | ||
346 | /* We do nothing. */ | ||
347 | } | ||
348 | |||
349 | static inline int | ||
350 | dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents, | ||
351 | enum dma_data_direction direction) | ||
352 | { | ||
353 | struct scatterlist *sg; | ||
354 | int i; | ||
355 | |||
356 | BUG_ON(direction == DMA_NONE); | ||
357 | |||
358 | for_each_sg(sgl, sg, nents, i) { | ||
359 | BUG_ON(!sg_page(sg)); | ||
360 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | ||
361 | sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; | ||
362 | } | ||
363 | |||
364 | return nents; | ||
365 | } | ||
366 | |||
367 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
368 | int nhwentries, | ||
369 | enum dma_data_direction direction) | ||
370 | { | ||
371 | /* We don't do anything here. */ | ||
372 | } | ||
373 | |||
374 | #endif /* CONFIG_PPC64 */ | ||
375 | |||
376 | static inline void dma_sync_single_for_cpu(struct device *dev, | 311 | static inline void dma_sync_single_for_cpu(struct device *dev, |
377 | dma_addr_t dma_handle, size_t size, | 312 | dma_addr_t dma_handle, size_t size, |
378 | enum dma_data_direction direction) | 313 | enum dma_data_direction direction) |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 893aafd87fde..2740c44ff717 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -88,8 +88,6 @@ struct machdep_calls { | |||
88 | unsigned long (*tce_get)(struct iommu_table *tbl, | 88 | unsigned long (*tce_get)(struct iommu_table *tbl, |
89 | long index); | 89 | long index); |
90 | void (*tce_flush)(struct iommu_table *tbl); | 90 | void (*tce_flush)(struct iommu_table *tbl); |
91 | void (*pci_dma_dev_setup)(struct pci_dev *dev); | ||
92 | void (*pci_dma_bus_setup)(struct pci_bus *bus); | ||
93 | 91 | ||
94 | void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, | 92 | void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, |
95 | unsigned long flags); | 93 | unsigned long flags); |
@@ -101,6 +99,9 @@ struct machdep_calls { | |||
101 | #endif | 99 | #endif |
102 | #endif /* CONFIG_PPC64 */ | 100 | #endif /* CONFIG_PPC64 */ |
103 | 101 | ||
102 | void (*pci_dma_dev_setup)(struct pci_dev *dev); | ||
103 | void (*pci_dma_bus_setup)(struct pci_bus *bus); | ||
104 | |||
104 | int (*probe)(void); | 105 | int (*probe)(void); |
105 | void (*setup_arch)(void); /* Optional, may be NULL */ | 106 | void (*setup_arch)(void); /* Optional, may be NULL */ |
106 | void (*init_early)(void); | 107 | void (*init_early)(void); |
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index a05a942b1c25..0e52c7828ea4 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h | |||
@@ -60,6 +60,14 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | |||
60 | return channel ? 15 : 14; | 60 | return channel ? 15 : 14; |
61 | } | 61 | } |
62 | 62 | ||
63 | #ifdef CONFIG_PCI | ||
64 | extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops); | ||
65 | extern struct dma_mapping_ops *get_pci_dma_ops(void); | ||
66 | #else /* CONFIG_PCI */ | ||
67 | #define set_pci_dma_ops(d) | ||
68 | #define get_pci_dma_ops() NULL | ||
69 | #endif | ||
70 | |||
63 | #ifdef CONFIG_PPC64 | 71 | #ifdef CONFIG_PPC64 |
64 | 72 | ||
65 | /* | 73 | /* |
@@ -70,9 +78,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | |||
70 | #define PCI_DISABLE_MWI | 78 | #define PCI_DISABLE_MWI |
71 | 79 | ||
72 | #ifdef CONFIG_PCI | 80 | #ifdef CONFIG_PCI |
73 | extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops); | ||
74 | extern struct dma_mapping_ops *get_pci_dma_ops(void); | ||
75 | |||
76 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | 81 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, |
77 | enum pci_dma_burst_strategy *strat, | 82 | enum pci_dma_burst_strategy *strat, |
78 | unsigned long *strategy_parameter) | 83 | unsigned long *strategy_parameter) |
@@ -89,9 +94,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, | |||
89 | *strat = PCI_DMA_BURST_MULTIPLE; | 94 | *strat = PCI_DMA_BURST_MULTIPLE; |
90 | *strategy_parameter = cacheline_size; | 95 | *strategy_parameter = cacheline_size; |
91 | } | 96 | } |
92 | #else /* CONFIG_PCI */ | ||
93 | #define set_pci_dma_ops(d) | ||
94 | #define get_pci_dma_ops() NULL | ||
95 | #endif | 97 | #endif |
96 | 98 | ||
97 | #else /* 32-bit */ | 99 | #else /* 32-bit */ |