aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-07-28 01:39:14 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-30 03:27:34 -0400
commitad7ad57c6127042c411353dddb723765964815db (patch)
tree600484291d9cfa68d54dc9b230f5bd115f495213 /include
parentc7f439b99efbea74c70a5531f92566db5a6731f2 (diff)
[SPARC64]: Fix conflicts in SBUS/PCI/EBUS/ISA DMA handling.
Fully unify all of the DMA ops so that subordinate bus types to the DMA operation providers (such as ebus, isa, of_device) can work transparently. Basically, we just make sure that for every system device we create, the dev->archdata 'iommu' and 'stc' fields are filled in. Then we have two platform variants of the DMA ops, one for SUN4U which actually programs the real hardware, and one for SUN4V which makes hypervisor calls. This also fixes the crashes in parport_pc on sparc64, reported by Meelis Roos. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/asm-sparc/device.h4
-rw-r--r--include/asm-sparc64/dma-mapping.h337
-rw-r--r--include/asm-sparc64/iommu.h11
-rw-r--r--include/asm-sparc64/parport.h2
-rw-r--r--include/asm-sparc64/pci.h152
-rw-r--r--include/asm-sparc64/sbus.h86
6 files changed, 207 insertions, 385 deletions
diff --git a/include/asm-sparc/device.h b/include/asm-sparc/device.h
index 4a56d84d69c4..c0a7786d65f7 100644
--- a/include/asm-sparc/device.h
+++ b/include/asm-sparc/device.h
@@ -10,6 +10,10 @@ struct device_node;
10struct of_device; 10struct of_device;
11 11
12struct dev_archdata { 12struct dev_archdata {
13 void *iommu;
14 void *stc;
15 void *host_controller;
16
13 struct device_node *prom_node; 17 struct device_node *prom_node;
14 struct of_device *op; 18 struct of_device *op;
15}; 19};
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index c58ec1661df8..0a1006692bb2 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -1,307 +1,134 @@
1#ifndef _ASM_SPARC64_DMA_MAPPING_H 1#ifndef _ASM_SPARC64_DMA_MAPPING_H
2#define _ASM_SPARC64_DMA_MAPPING_H 2#define _ASM_SPARC64_DMA_MAPPING_H
3 3
4 4#include <linux/scatterlist.h>
5#ifdef CONFIG_PCI
6
7/* we implement the API below in terms of the existing PCI one,
8 * so include it */
9#include <linux/pci.h>
10/* need struct page definitions */
11#include <linux/mm.h> 5#include <linux/mm.h>
12 6
13#include <asm/of_device.h> 7#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
14 8
15static inline int 9struct dma_ops {
16dma_supported(struct device *dev, u64 mask) 10 void *(*alloc_coherent)(struct device *dev, size_t size,
17{ 11 dma_addr_t *dma_handle, gfp_t flag);
18 BUG_ON(dev->bus != &pci_bus_type); 12 void (*free_coherent)(struct device *dev, size_t size,
19 13 void *cpu_addr, dma_addr_t dma_handle);
20 return pci_dma_supported(to_pci_dev(dev), mask); 14 dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
21} 15 size_t size,
22 16 enum dma_data_direction direction);
23static inline int 17 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
24dma_set_mask(struct device *dev, u64 dma_mask) 18 size_t size,
25{ 19 enum dma_data_direction direction);
26 BUG_ON(dev->bus != &pci_bus_type); 20 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
27 21 enum dma_data_direction direction);
28 return pci_set_dma_mask(to_pci_dev(dev), dma_mask); 22 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
29} 23 int nhwentries,
30 24 enum dma_data_direction direction);
31static inline void * 25 void (*sync_single_for_cpu)(struct device *dev,
32dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 26 dma_addr_t dma_handle, size_t size,
33 gfp_t flag) 27 enum dma_data_direction direction);
34{ 28 void (*sync_single_for_device)(struct device *dev,
35 BUG_ON(dev->bus != &pci_bus_type); 29 dma_addr_t dma_handle, size_t size,
36 30 enum dma_data_direction direction);
37 return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag); 31 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
38} 32 int nelems,
39 33 enum dma_data_direction direction);
40static inline void 34 void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
41dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 35 int nelems,
42 dma_addr_t dma_handle) 36 enum dma_data_direction direction);
43{ 37};
44 BUG_ON(dev->bus != &pci_bus_type); 38extern const struct dma_ops *dma_ops;
45 39
46 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); 40extern int dma_supported(struct device *dev, u64 mask);
47} 41extern int dma_set_mask(struct device *dev, u64 dma_mask);
48
49static inline dma_addr_t
50dma_map_single(struct device *dev, void *cpu_addr, size_t size,
51 enum dma_data_direction direction)
52{
53 BUG_ON(dev->bus != &pci_bus_type);
54
55 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
56}
57
58static inline void
59dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
60 enum dma_data_direction direction)
61{
62 BUG_ON(dev->bus != &pci_bus_type);
63
64 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
65}
66
67static inline dma_addr_t
68dma_map_page(struct device *dev, struct page *page,
69 unsigned long offset, size_t size,
70 enum dma_data_direction direction)
71{
72 BUG_ON(dev->bus != &pci_bus_type);
73
74 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
75}
76
77static inline void
78dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
79 enum dma_data_direction direction)
80{
81 BUG_ON(dev->bus != &pci_bus_type);
82
83 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
84}
85
86static inline int
87dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
88 enum dma_data_direction direction)
89{
90 BUG_ON(dev->bus != &pci_bus_type);
91
92 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
93}
94
95static inline void
96dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
97 enum dma_data_direction direction)
98{
99 BUG_ON(dev->bus != &pci_bus_type);
100
101 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
102}
103
104static inline void
105dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
106 enum dma_data_direction direction)
107{
108 BUG_ON(dev->bus != &pci_bus_type);
109
110 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
111 size, (int)direction);
112}
113
114static inline void
115dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
116 enum dma_data_direction direction)
117{
118 BUG_ON(dev->bus != &pci_bus_type);
119
120 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
121 size, (int)direction);
122}
123
124static inline void
125dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
126 enum dma_data_direction direction)
127{
128 BUG_ON(dev->bus != &pci_bus_type);
129
130 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
131}
132
133static inline void
134dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
135 enum dma_data_direction direction)
136{
137 BUG_ON(dev->bus != &pci_bus_type);
138
139 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
140}
141
142static inline int
143dma_mapping_error(dma_addr_t dma_addr)
144{
145 return pci_dma_mapping_error(dma_addr);
146}
147
148#else
149
150struct device;
151struct page;
152struct scatterlist;
153
154static inline int
155dma_supported(struct device *dev, u64 mask)
156{
157 BUG();
158 return 0;
159}
160
161static inline int
162dma_set_mask(struct device *dev, u64 dma_mask)
163{
164 BUG();
165 return 0;
166}
167 42
168static inline void *dma_alloc_coherent(struct device *dev, size_t size, 43static inline void *dma_alloc_coherent(struct device *dev, size_t size,
169 dma_addr_t *dma_handle, gfp_t flag) 44 dma_addr_t *dma_handle, gfp_t flag)
170{ 45{
171 BUG(); 46 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
172 return NULL;
173} 47}
174 48
175static inline void dma_free_coherent(struct device *dev, size_t size, 49static inline void dma_free_coherent(struct device *dev, size_t size,
176 void *vaddr, dma_addr_t dma_handle) 50 void *cpu_addr, dma_addr_t dma_handle)
177{ 51{
178 BUG(); 52 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
179} 53}
180 54
181static inline dma_addr_t 55static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
182dma_map_single(struct device *dev, void *cpu_addr, size_t size, 56 size_t size,
183 enum dma_data_direction direction) 57 enum dma_data_direction direction)
184{ 58{
185 BUG(); 59 return dma_ops->map_single(dev, cpu_addr, size, direction);
186 return 0;
187} 60}
188 61
189static inline void 62static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
190dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 63 size_t size,
191 enum dma_data_direction direction) 64 enum dma_data_direction direction)
192{ 65{
193 BUG(); 66 dma_ops->unmap_single(dev, dma_addr, size, direction);
194} 67}
195 68
196static inline dma_addr_t 69static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
197dma_map_page(struct device *dev, struct page *page, 70 unsigned long offset, size_t size,
198 unsigned long offset, size_t size, 71 enum dma_data_direction direction)
199 enum dma_data_direction direction)
200{ 72{
201 BUG(); 73 return dma_ops->map_single(dev, page_address(page) + offset,
202 return 0; 74 size, direction);
203} 75}
204 76
205static inline void 77static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
206dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 78 size_t size,
207 enum dma_data_direction direction) 79 enum dma_data_direction direction)
208{ 80{
209 BUG(); 81 dma_ops->unmap_single(dev, dma_address, size, direction);
210} 82}
211 83
212static inline int 84static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
213dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 85 int nents, enum dma_data_direction direction)
214 enum dma_data_direction direction)
215{ 86{
216 BUG(); 87 return dma_ops->map_sg(dev, sg, nents, direction);
217 return 0;
218} 88}
219 89
220static inline void 90static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
221dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 91 int nents, enum dma_data_direction direction)
222 enum dma_data_direction direction)
223{ 92{
224 BUG(); 93 dma_ops->unmap_sg(dev, sg, nents, direction);
225} 94}
226 95
227static inline void 96static inline void dma_sync_single_for_cpu(struct device *dev,
228dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 97 dma_addr_t dma_handle, size_t size,
229 enum dma_data_direction direction) 98 enum dma_data_direction direction)
230{ 99{
231 BUG(); 100 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
232} 101}
233 102
234static inline void 103static inline void dma_sync_single_for_device(struct device *dev,
235dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 104 dma_addr_t dma_handle,
236 enum dma_data_direction direction) 105 size_t size,
106 enum dma_data_direction direction)
237{ 107{
238 BUG(); 108 dma_ops->sync_single_for_device(dev, dma_handle, size, direction);
239} 109}
240 110
241static inline void 111static inline void dma_sync_sg_for_cpu(struct device *dev,
242dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 112 struct scatterlist *sg, int nelems,
243 enum dma_data_direction direction) 113 enum dma_data_direction direction)
244{ 114{
245 BUG(); 115 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
246} 116}
247 117
248static inline void 118static inline void dma_sync_sg_for_device(struct device *dev,
249dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 119 struct scatterlist *sg, int nelems,
250 enum dma_data_direction direction) 120 enum dma_data_direction direction)
251{ 121{
252 BUG(); 122 dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
253} 123}
254 124
255static inline int 125static inline int dma_mapping_error(dma_addr_t dma_addr)
256dma_mapping_error(dma_addr_t dma_addr)
257{ 126{
258 BUG(); 127 return (dma_addr == DMA_ERROR_CODE);
259 return 0;
260} 128}
261 129
262#endif /* PCI */
263
264
265/* Now for the API extensions over the pci_ one */
266
267#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 130#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
268#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 131#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
269#define dma_is_consistent(d, h) (1) 132#define dma_is_consistent(d, h) (1)
270 133
271static inline int
272dma_get_cache_alignment(void)
273{
274 /* no easy way to get cache size on all processors, so return
275 * the maximum possible, to be safe */
276 return (1 << INTERNODE_CACHE_SHIFT);
277}
278
279static inline void
280dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
281 unsigned long offset, size_t size,
282 enum dma_data_direction direction)
283{
284 /* just sync everything, that's all the pci API can do */
285 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
286}
287
288static inline void
289dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
290 unsigned long offset, size_t size,
291 enum dma_data_direction direction)
292{
293 /* just sync everything, that's all the pci API can do */
294 dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
295}
296
297static inline void
298dma_cache_sync(struct device *dev, void *vaddr, size_t size,
299 enum dma_data_direction direction)
300{
301 /* could define this in terms of the dma_cache ... operations,
302 * but if you get this on a platform, you should convert the platform
303 * to using the generic device DMA API */
304 BUG();
305}
306
307#endif /* _ASM_SPARC64_DMA_MAPPING_H */ 134#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/include/asm-sparc64/iommu.h b/include/asm-sparc64/iommu.h
index 0b1813f41045..9eac6676caf1 100644
--- a/include/asm-sparc64/iommu.h
+++ b/include/asm-sparc64/iommu.h
@@ -1,7 +1,6 @@
1/* $Id: iommu.h,v 1.10 2001/03/08 09:55:56 davem Exp $ 1/* iommu.h: Definitions for the sun5 IOMMU.
2 * iommu.h: Definitions for the sun5 IOMMU.
3 * 2 *
4 * Copyright (C) 1996, 1999 David S. Miller (davem@caip.rutgers.edu) 3 * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net)
5 */ 4 */
6#ifndef _SPARC64_IOMMU_H 5#ifndef _SPARC64_IOMMU_H
7#define _SPARC64_IOMMU_H 6#define _SPARC64_IOMMU_H
@@ -33,6 +32,7 @@ struct iommu {
33 unsigned long iommu_tsbbase; 32 unsigned long iommu_tsbbase;
34 unsigned long iommu_flush; 33 unsigned long iommu_flush;
35 unsigned long iommu_flushinv; 34 unsigned long iommu_flushinv;
35 unsigned long iommu_tags;
36 unsigned long iommu_ctxflush; 36 unsigned long iommu_ctxflush;
37 unsigned long write_complete_reg; 37 unsigned long write_complete_reg;
38 unsigned long dummy_page; 38 unsigned long dummy_page;
@@ -54,4 +54,7 @@ struct strbuf {
54 volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)]; 54 volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)];
55}; 55};
56 56
57#endif /* !(_SPARC_IOMMU_H) */ 57extern int iommu_table_init(struct iommu *iommu, int tsbsize,
58 u32 dma_offset, u32 dma_addr_mask);
59
60#endif /* !(_SPARC64_IOMMU_H) */
diff --git a/include/asm-sparc64/parport.h b/include/asm-sparc64/parport.h
index 600afe5ae2e3..8116e8f6062c 100644
--- a/include/asm-sparc64/parport.h
+++ b/include/asm-sparc64/parport.h
@@ -117,7 +117,7 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id
117 if (!strcmp(parent->name, "dma")) { 117 if (!strcmp(parent->name, "dma")) {
118 p = parport_pc_probe_port(base, base + 0x400, 118 p = parport_pc_probe_port(base, base + 0x400,
119 op->irqs[0], PARPORT_DMA_NOFIFO, 119 op->irqs[0], PARPORT_DMA_NOFIFO,
120 op->dev.parent); 120 op->dev.parent->parent);
121 if (!p) 121 if (!p)
122 return -ENOMEM; 122 return -ENOMEM;
123 dev_set_drvdata(&op->dev, p); 123 dev_set_drvdata(&op->dev, p);
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h
index e11ac100f043..1393e57d50fb 100644
--- a/include/asm-sparc64/pci.h
+++ b/include/asm-sparc64/pci.h
@@ -3,8 +3,7 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/fs.h> 6#include <linux/dma-mapping.h>
7#include <linux/mm.h>
8 7
9/* Can be used to override the logic in pci_scan_bus for skipping 8/* Can be used to override the logic in pci_scan_bus for skipping
10 * already-configured bus numbers - to be used for buggy BIOSes 9 * already-configured bus numbers - to be used for buggy BIOSes
@@ -30,80 +29,42 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
30 /* We don't do dynamic PCI IRQ allocation */ 29 /* We don't do dynamic PCI IRQ allocation */
31} 30}
32 31
33/* Dynamic DMA mapping stuff.
34 */
35
36/* The PCI address space does not equal the physical memory 32/* The PCI address space does not equal the physical memory
37 * address space. The networking and block device layers use 33 * address space. The networking and block device layers use
38 * this boolean for bounce buffer decisions. 34 * this boolean for bounce buffer decisions.
39 */ 35 */
40#define PCI_DMA_BUS_IS_PHYS (0) 36#define PCI_DMA_BUS_IS_PHYS (0)
41 37
42#include <asm/scatterlist.h> 38static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
43 39 dma_addr_t *dma_handle)
44struct pci_dev;
45
46struct pci_iommu_ops {
47 void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *, gfp_t);
48 void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t);
49 dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int);
50 void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int);
51 int (*map_sg)(struct pci_dev *, struct scatterlist *, int, int);
52 void (*unmap_sg)(struct pci_dev *, struct scatterlist *, int, int);
53 void (*dma_sync_single_for_cpu)(struct pci_dev *, dma_addr_t, size_t, int);
54 void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int);
55};
56
57extern const struct pci_iommu_ops *pci_iommu_ops;
58
59/* Allocate and map kernel buffer using consistent mode DMA for a device.
60 * hwdev should be valid struct pci_dev pointer for PCI devices.
61 */
62static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
63{ 40{
64 return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle, GFP_ATOMIC); 41 return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
65} 42}
66 43
67/* Free and unmap a consistent DMA buffer. 44static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
68 * cpu_addr is what was returned from pci_alloc_consistent, 45 void *vaddr, dma_addr_t dma_handle)
69 * size must be the same as what as passed into pci_alloc_consistent,
70 * and likewise dma_addr must be the same as what *dma_addrp was set to.
71 *
72 * References to the memory and mappings associated with cpu_addr/dma_addr
73 * past this call are illegal.
74 */
75static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
76{ 46{
77 return pci_iommu_ops->free_consistent(hwdev, size, vaddr, dma_handle); 47 return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
78} 48}
79 49
80/* Map a single buffer of the indicated size for DMA in streaming mode. 50static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
81 * The 32-bit bus address to use is returned. 51 size_t size, int direction)
82 *
83 * Once the device is given the dma address, the device owns this memory
84 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
85 */
86static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
87{ 52{
88 return pci_iommu_ops->map_single(hwdev, ptr, size, direction); 53 return dma_map_single(&pdev->dev, ptr, size,
54 (enum dma_data_direction) direction);
89} 55}
90 56
91/* Unmap a single streaming mode DMA translation. The dma_addr and size 57static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
92 * must match what was provided for in a previous pci_map_single call. All 58 size_t size, int direction)
93 * other usages are undefined.
94 *
95 * After this call, reads by the cpu to the buffer are guaranteed to see
96 * whatever the device wrote there.
97 */
98static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
99{ 59{
100 pci_iommu_ops->unmap_single(hwdev, dma_addr, size, direction); 60 dma_unmap_single(&pdev->dev, dma_addr, size,
61 (enum dma_data_direction) direction);
101} 62}
102 63
103/* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */
104#define pci_map_page(dev, page, off, size, dir) \ 64#define pci_map_page(dev, page, off, size, dir) \
105 pci_map_single(dev, (page_address(page) + (off)), size, dir) 65 pci_map_single(dev, (page_address(page) + (off)), size, dir)
106#define pci_unmap_page(dev,addr,sz,dir) pci_unmap_single(dev,addr,sz,dir) 66#define pci_unmap_page(dev,addr,sz,dir) \
67 pci_unmap_single(dev,addr,sz,dir)
107 68
108/* pci_unmap_{single,page} is not a nop, thus... */ 69/* pci_unmap_{single,page} is not a nop, thus... */
109#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
@@ -119,75 +80,48 @@ static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
119#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
120 (((PTR)->LEN_NAME) = (VAL)) 81 (((PTR)->LEN_NAME) = (VAL))
121 82
122/* Map a set of buffers described by scatterlist in streaming 83static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
123 * mode for DMA. This is the scatter-gather version of the 84 int nents, int direction)
124 * above pci_map_single interface. Here the scatter gather list
125 * elements are each tagged with the appropriate dma address
126 * and length. They are obtained via sg_dma_{address,length}(SG).
127 *
128 * NOTE: An implementation may be able to use a smaller number of
129 * DMA address/length pairs than there are SG table elements.
130 * (for example via virtual mapping capabilities)
131 * The routine returns the number of addr/length pairs actually
132 * used, at most nents.
133 *
134 * Device ownership issues as mentioned above for pci_map_single are
135 * the same here.
136 */
137static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
138{ 85{
139 return pci_iommu_ops->map_sg(hwdev, sg, nents, direction); 86 return dma_map_sg(&pdev->dev, sg, nents,
87 (enum dma_data_direction) direction);
140} 88}
141 89
142/* Unmap a set of streaming mode DMA translations. 90static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
143 * Again, cpu read rules concerning calls here are the same as for 91 int nents, int direction)
144 * pci_unmap_single() above.
145 */
146static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction)
147{ 92{
148 pci_iommu_ops->unmap_sg(hwdev, sg, nhwents, direction); 93 dma_unmap_sg(&pdev->dev, sg, nents,
94 (enum dma_data_direction) direction);
149} 95}
150 96
151/* Make physical memory consistent for a single 97static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
152 * streaming mode DMA translation after a transfer. 98 dma_addr_t dma_handle,
153 * 99 size_t size, int direction)
154 * If you perform a pci_map_single() but wish to interrogate the
155 * buffer using the cpu, yet do not wish to teardown the PCI dma
156 * mapping, you must call this function before doing so. At the
157 * next point you give the PCI dma address back to the card, you
158 * must first perform a pci_dma_sync_for_device, and then the
159 * device again owns the buffer.
160 */
161static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
162{ 100{
163 pci_iommu_ops->dma_sync_single_for_cpu(hwdev, dma_handle, size, direction); 101 dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
102 (enum dma_data_direction) direction);
164} 103}
165 104
166static inline void 105static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
167pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, 106 dma_addr_t dma_handle,
168 size_t size, int direction) 107 size_t size, int direction)
169{ 108{
170 /* No flushing needed to sync cpu writes to the device. */ 109 /* No flushing needed to sync cpu writes to the device. */
171 BUG_ON(direction == PCI_DMA_NONE);
172} 110}
173 111
174/* Make physical memory consistent for a set of streaming 112static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
175 * mode DMA translations after a transfer. 113 struct scatterlist *sg,
176 * 114 int nents, int direction)
177 * The same as pci_dma_sync_single_* but for a scatter-gather list,
178 * same rules and usage.
179 */
180static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
181{ 115{
182 pci_iommu_ops->dma_sync_sg_for_cpu(hwdev, sg, nelems, direction); 116 dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
117 (enum dma_data_direction) direction);
183} 118}
184 119
185static inline void 120static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
186pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, 121 struct scatterlist *sg,
187 int nelems, int direction) 122 int nelems, int direction)
188{ 123{
189 /* No flushing needed to sync cpu writes to the device. */ 124 /* No flushing needed to sync cpu writes to the device. */
190 BUG_ON(direction == PCI_DMA_NONE);
191} 125}
192 126
193/* Return whether the given PCI device DMA address mask can 127/* Return whether the given PCI device DMA address mask can
@@ -206,11 +140,9 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
206#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) 140#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
207#define PCI64_ADDR_BASE 0xfffc000000000000UL 141#define PCI64_ADDR_BASE 0xfffc000000000000UL
208 142
209#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
210
211static inline int pci_dma_mapping_error(dma_addr_t dma_addr) 143static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
212{ 144{
213 return (dma_addr == PCI_DMA_ERROR_CODE); 145 return dma_mapping_error(dma_addr);
214} 146}
215 147
216#ifdef CONFIG_PCI 148#ifdef CONFIG_PCI
diff --git a/include/asm-sparc64/sbus.h b/include/asm-sparc64/sbus.h
index 7efd49d31bb8..0151cad486f3 100644
--- a/include/asm-sparc64/sbus.h
+++ b/include/asm-sparc64/sbus.h
@@ -1,7 +1,6 @@
1/* $Id: sbus.h,v 1.14 2000/02/18 13:50:55 davem Exp $ 1/* sbus.h: Defines for the Sun SBus.
2 * sbus.h: Defines for the Sun SBus.
3 * 2 *
4 * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com) 3 * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net)
5 */ 4 */
6 5
7#ifndef _SPARC64_SBUS_H 6#ifndef _SPARC64_SBUS_H
@@ -69,7 +68,6 @@ struct sbus_dev {
69/* This struct describes the SBus(s) found on this machine. */ 68/* This struct describes the SBus(s) found on this machine. */
70struct sbus_bus { 69struct sbus_bus {
71 struct of_device ofdev; 70 struct of_device ofdev;
72 void *iommu; /* Opaque IOMMU cookie */
73 struct sbus_dev *devices; /* Tree of SBUS devices */ 71 struct sbus_dev *devices; /* Tree of SBUS devices */
74 struct sbus_bus *next; /* Next SBUS in system */ 72 struct sbus_bus *next; /* Next SBUS in system */
75 int prom_node; /* OBP node of SBUS */ 73 int prom_node; /* OBP node of SBUS */
@@ -102,9 +100,18 @@ extern struct sbus_bus *sbus_root;
102extern void sbus_set_sbus64(struct sbus_dev *, int); 100extern void sbus_set_sbus64(struct sbus_dev *, int);
103extern void sbus_fill_device_irq(struct sbus_dev *); 101extern void sbus_fill_device_irq(struct sbus_dev *);
104 102
105/* These yield IOMMU mappings in consistent mode. */ 103static inline void *sbus_alloc_consistent(struct sbus_dev *sdev , size_t size,
106extern void *sbus_alloc_consistent(struct sbus_dev *, size_t, dma_addr_t *dma_addrp); 104 dma_addr_t *dma_handle)
107extern void sbus_free_consistent(struct sbus_dev *, size_t, void *, dma_addr_t); 105{
106 return dma_alloc_coherent(&sdev->ofdev.dev, size,
107 dma_handle, GFP_ATOMIC);
108}
109
110static inline void sbus_free_consistent(struct sbus_dev *sdev, size_t size,
111 void *vaddr, dma_addr_t dma_handle)
112{
113 return dma_free_coherent(&sdev->ofdev.dev, size, vaddr, dma_handle);
114}
108 115
109#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL 116#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
110#define SBUS_DMA_TODEVICE DMA_TO_DEVICE 117#define SBUS_DMA_TODEVICE DMA_TO_DEVICE
@@ -112,18 +119,67 @@ extern void sbus_free_consistent(struct sbus_dev *, size_t, void *, dma_addr_t);
112#define SBUS_DMA_NONE DMA_NONE 119#define SBUS_DMA_NONE DMA_NONE
113 120
114/* All the rest use streaming mode mappings. */ 121/* All the rest use streaming mode mappings. */
115extern dma_addr_t sbus_map_single(struct sbus_dev *, void *, size_t, int); 122static inline dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr,
116extern void sbus_unmap_single(struct sbus_dev *, dma_addr_t, size_t, int); 123 size_t size, int direction)
117extern int sbus_map_sg(struct sbus_dev *, struct scatterlist *, int, int); 124{
118extern void sbus_unmap_sg(struct sbus_dev *, struct scatterlist *, int, int); 125 return dma_map_single(&sdev->ofdev.dev, ptr, size,
126 (enum dma_data_direction) direction);
127}
128
129static inline void sbus_unmap_single(struct sbus_dev *sdev,
130 dma_addr_t dma_addr, size_t size,
131 int direction)
132{
133 dma_unmap_single(&sdev->ofdev.dev, dma_addr, size,
134 (enum dma_data_direction) direction);
135}
136
137static inline int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg,
138 int nents, int direction)
139{
140 return dma_map_sg(&sdev->ofdev.dev, sg, nents,
141 (enum dma_data_direction) direction);
142}
143
144static inline void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg,
145 int nents, int direction)
146{
147 dma_unmap_sg(&sdev->ofdev.dev, sg, nents,
148 (enum dma_data_direction) direction);
149}
119 150
120/* Finally, allow explicit synchronization of streamable mappings. */ 151/* Finally, allow explicit synchronization of streamable mappings. */
121extern void sbus_dma_sync_single_for_cpu(struct sbus_dev *, dma_addr_t, size_t, int); 152static inline void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev,
153 dma_addr_t dma_handle,
154 size_t size, int direction)
155{
156 dma_sync_single_for_cpu(&sdev->ofdev.dev, dma_handle, size,
157 (enum dma_data_direction) direction);
158}
122#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu 159#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
123extern void sbus_dma_sync_single_for_device(struct sbus_dev *, dma_addr_t, size_t, int); 160
124extern void sbus_dma_sync_sg_for_cpu(struct sbus_dev *, struct scatterlist *, int, int); 161static inline void sbus_dma_sync_single_for_device(struct sbus_dev *sdev,
162 dma_addr_t dma_handle,
163 size_t size, int direction)
164{
165 /* No flushing needed to sync cpu writes to the device. */
166}
167
168static inline void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev,
169 struct scatterlist *sg,
170 int nents, int direction)
171{
172 dma_sync_sg_for_cpu(&sdev->ofdev.dev, sg, nents,
173 (enum dma_data_direction) direction);
174}
125#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu 175#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
126extern void sbus_dma_sync_sg_for_device(struct sbus_dev *, struct scatterlist *, int, int); 176
177static inline void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev,
178 struct scatterlist *sg,
179 int nents, int direction)
180{
181 /* No flushing needed to sync cpu writes to the device. */
182}
127 183
128extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *); 184extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
129extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *); 185extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);