aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2010-03-10 18:23:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-12 18:52:42 -0500
commitc186caca3dbe7f44da624cb4f9d78e1b1dfb13b8 (patch)
treedcd42e55362b9b2d882a16822524c14971f812a3 /arch
parent349004294c365cb99a0ee84149286d6f29b1e6b9 (diff)
dma-mapping: alpha: use include/linux/pci-dma-compat.h
This converts Alpha to use include/linux/pci-dma-compat.h. Alpha is the only architecutre that implements the PCI DMA API in the own way. That makes it difficult to implement the generic DMA API via the PCI bus specific DMA API. The generic DMA API calls the PCI DMA API implementation in arch/alpha/kernel/pci_iommu.c on non Jensen systems. It calls the DMA API in arch/alpha/kernel/pci-noop.c on Jensen systems. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: Greg KH <greg@kroah.com> Cc: Kay Sievers <kay.sievers@vrfy.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/dma-mapping.h79
-rw-r--r--arch/alpha/include/asm/pci.h125
-rw-r--r--arch/alpha/kernel/pci-noop.c101
-rw-r--r--arch/alpha/kernel/pci_iommu.c201
5 files changed, 165 insertions, 342 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 759f49d1248d..75291fdd379f 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -10,6 +10,7 @@ config ALPHA
10 select HAVE_OPROFILE 10 select HAVE_OPROFILE
11 select HAVE_SYSCALL_WRAPPERS 11 select HAVE_SYSCALL_WRAPPERS
12 select HAVE_PERF_EVENTS 12 select HAVE_PERF_EVENTS
13 select HAVE_DMA_ATTRS
13 help 14 help
14 The Alpha is a 64-bit general-purpose processor designed and 15 The Alpha is a 64-bit general-purpose processor designed and
15 marketed by the Digital Equipment Corporation of blessed memory, 16 marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index f514edec9f57..1bce8169733c 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -1,70 +1,49 @@
1#ifndef _ALPHA_DMA_MAPPING_H 1#ifndef _ALPHA_DMA_MAPPING_H
2#define _ALPHA_DMA_MAPPING_H 2#define _ALPHA_DMA_MAPPING_H
3 3
4#include <linux/dma-attrs.h>
4 5
5#ifdef CONFIG_PCI 6extern struct dma_map_ops *dma_ops;
6 7
7#include <linux/pci.h> 8static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9{
10 return dma_ops;
11}
8 12
9#define dma_map_single(dev, va, size, dir) \ 13#include <asm-generic/dma-mapping-common.h>
10 pci_map_single(alpha_gendev_to_pci(dev), va, size, dir)
11#define dma_unmap_single(dev, addr, size, dir) \
12 pci_unmap_single(alpha_gendev_to_pci(dev), addr, size, dir)
13#define dma_alloc_coherent(dev, size, addr, gfp) \
14 __pci_alloc_consistent(alpha_gendev_to_pci(dev), size, addr, gfp)
15#define dma_free_coherent(dev, size, va, addr) \
16 pci_free_consistent(alpha_gendev_to_pci(dev), size, va, addr)
17#define dma_map_page(dev, page, off, size, dir) \
18 pci_map_page(alpha_gendev_to_pci(dev), page, off, size, dir)
19#define dma_unmap_page(dev, addr, size, dir) \
20 pci_unmap_page(alpha_gendev_to_pci(dev), addr, size, dir)
21#define dma_map_sg(dev, sg, nents, dir) \
22 pci_map_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
23#define dma_unmap_sg(dev, sg, nents, dir) \
24 pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
25#define dma_supported(dev, mask) \
26 pci_dma_supported(alpha_gendev_to_pci(dev), mask)
27#define dma_mapping_error(dev, addr) \
28 pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr)
29 14
30#else /* no PCI - no IOMMU. */ 15static inline void *dma_alloc_coherent(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp)
17{
18 return get_dma_ops(dev)->alloc_coherent(dev, size, dma_handle, gfp);
19}
31 20
32#include <asm/io.h> /* for virt_to_phys() */ 21static inline void dma_free_coherent(struct device *dev, size_t size,
22 void *vaddr, dma_addr_t dma_handle)
23{
24 get_dma_ops(dev)->free_coherent(dev, size, vaddr, dma_handle);
25}
33 26
34struct scatterlist; 27static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
35void *dma_alloc_coherent(struct device *dev, size_t size, 28{
36 dma_addr_t *dma_handle, gfp_t gfp); 29 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
37int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 30}
38 enum dma_data_direction direction);
39 31
40#define dma_free_coherent(dev, size, va, addr) \ 32static inline int dma_supported(struct device *dev, u64 mask)
41 free_pages((unsigned long)va, get_order(size)) 33{
42#define dma_supported(dev, mask) (mask < 0x00ffffffUL ? 0 : 1) 34 return get_dma_ops(dev)->dma_supported(dev, mask);
43#define dma_map_single(dev, va, size, dir) virt_to_phys(va) 35}
44#define dma_map_page(dev, page, off, size, dir) (page_to_pa(page) + off)
45 36
46#define dma_unmap_single(dev, addr, size, dir) ((void)0) 37static inline int dma_set_mask(struct device *dev, u64 mask)
47#define dma_unmap_page(dev, addr, size, dir) ((void)0) 38{
48#define dma_unmap_sg(dev, sg, nents, dir) ((void)0) 39 return get_dma_ops(dev)->set_dma_mask(dev, mask);
49 40}
50#define dma_mapping_error(dev, addr) (0)
51
52#endif /* !CONFIG_PCI */
53 41
54#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 42#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
55#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 43#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
56#define dma_is_consistent(d, h) (1) 44#define dma_is_consistent(d, h) (1)
57 45
58int dma_set_mask(struct device *dev, u64 mask);
59
60#define dma_sync_single_for_cpu(dev, addr, size, dir) ((void)0)
61#define dma_sync_single_for_device(dev, addr, size, dir) ((void)0)
62#define dma_sync_sg_for_cpu(dev, sg, nents, dir) ((void)0)
63#define dma_sync_sg_for_device(dev, sg, nents, dir) ((void)0)
64#define dma_cache_sync(dev, va, size, dir) ((void)0) 46#define dma_cache_sync(dev, va, size, dir) ((void)0)
65#define dma_sync_single_range_for_cpu(dev, addr, offset, size, dir) ((void)0)
66#define dma_sync_single_range_for_device(dev, addr, offset, size, dir) ((void)0)
67
68#define dma_get_cache_alignment() L1_CACHE_BYTES 47#define dma_get_cache_alignment() L1_CACHE_BYTES
69 48
70#endif /* _ALPHA_DMA_MAPPING_H */ 49#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h
index e1846ba6aaba..28d0497fd3c7 100644
--- a/arch/alpha/include/asm/pci.h
+++ b/arch/alpha/include/asm/pci.h
@@ -70,128 +70,11 @@ extern inline void pcibios_penalize_isa_irq(int irq, int active)
70 decisions. */ 70 decisions. */
71#define PCI_DMA_BUS_IS_PHYS 0 71#define PCI_DMA_BUS_IS_PHYS 0
72 72
73/* Allocate and map kernel buffer using consistent mode DMA for PCI 73#ifdef CONFIG_PCI
74 device. Returns non-NULL cpu-view pointer to the buffer if
75 successful and sets *DMA_ADDRP to the pci side dma address as well,
76 else DMA_ADDRP is undefined. */
77
78extern void *__pci_alloc_consistent(struct pci_dev *, size_t,
79 dma_addr_t *, gfp_t);
80static inline void *
81pci_alloc_consistent(struct pci_dev *dev, size_t size, dma_addr_t *dma)
82{
83 return __pci_alloc_consistent(dev, size, dma, GFP_ATOMIC);
84}
85
86/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
87 be values that were returned from pci_alloc_consistent. SIZE must
88 be the same as what as passed into pci_alloc_consistent.
89 References to the memory and mappings associated with CPU_ADDR or
90 DMA_ADDR past this call are illegal. */
91
92extern void pci_free_consistent(struct pci_dev *, size_t, void *, dma_addr_t);
93
94/* Map a single buffer of the indicate size for PCI DMA in streaming mode.
95 The 32-bit PCI bus mastering address to use is returned. Once the device
96 is given the dma address, the device owns this memory until either
97 pci_unmap_single or pci_dma_sync_single_for_cpu is performed. */
98
99extern dma_addr_t pci_map_single(struct pci_dev *, void *, size_t, int);
100
101/* Likewise, but for a page instead of an address. */
102extern dma_addr_t pci_map_page(struct pci_dev *, struct page *,
103 unsigned long, size_t, int);
104
105/* Test for pci_map_single or pci_map_page having generated an error. */
106
107static inline int
108pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
109{
110 return dma_addr == 0;
111}
112
113/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
114 SIZE must match what was provided for in a previous pci_map_single
115 call. All other usages are undefined. After this call, reads by
116 the cpu to the buffer are guaranteed to see whatever the device
117 wrote there. */
118
119extern void pci_unmap_single(struct pci_dev *, dma_addr_t, size_t, int);
120extern void pci_unmap_page(struct pci_dev *, dma_addr_t, size_t, int);
121
122/* Map a set of buffers described by scatterlist in streaming mode for
123 PCI DMA. This is the scatter-gather version of the above
124 pci_map_single interface. Here the scatter gather list elements
125 are each tagged with the appropriate PCI dma address and length.
126 They are obtained via sg_dma_{address,length}(SG).
127
128 NOTE: An implementation may be able to use a smaller number of DMA
129 address/length pairs than there are SG table elements. (for
130 example via virtual mapping capabilities) The routine returns the
131 number of addr/length pairs actually used, at most nents.
132
133 Device ownership issues as mentioned above for pci_map_single are
134 the same here. */
135
136extern int pci_map_sg(struct pci_dev *, struct scatterlist *, int, int);
137
138/* Unmap a set of streaming mode DMA translations. Again, cpu read
139 rules concerning calls here are the same as for pci_unmap_single()
140 above. */
141
142extern void pci_unmap_sg(struct pci_dev *, struct scatterlist *, int, int);
143
144/* Make physical memory consistent for a single streaming mode DMA
145 translation after a transfer and device currently has ownership
146 of the buffer.
147
148 If you perform a pci_map_single() but wish to interrogate the
149 buffer using the cpu, yet do not wish to teardown the PCI dma
150 mapping, you must call this function before doing so. At the next
151 point you give the PCI dma address back to the card, you must first
152 perform a pci_dma_sync_for_device, and then the device again owns
153 the buffer. */
154
155static inline void
156pci_dma_sync_single_for_cpu(struct pci_dev *dev, dma_addr_t dma_addr,
157 long size, int direction)
158{
159 /* Nothing to do. */
160}
161
162static inline void
163pci_dma_sync_single_for_device(struct pci_dev *dev, dma_addr_t dma_addr,
164 size_t size, int direction)
165{
166 /* Nothing to do. */
167}
168
169/* Make physical memory consistent for a set of streaming mode DMA
170 translations after a transfer. The same as pci_dma_sync_single_*
171 but for a scatter-gather list, same rules and usage. */
172
173static inline void
174pci_dma_sync_sg_for_cpu(struct pci_dev *dev, struct scatterlist *sg,
175 int nents, int direction)
176{
177 /* Nothing to do. */
178}
179
180static inline void
181pci_dma_sync_sg_for_device(struct pci_dev *dev, struct scatterlist *sg,
182 int nents, int direction)
183{
184 /* Nothing to do. */
185}
186
187/* Return whether the given PCI device DMA address mask can
188 be supported properly. For example, if your device can
189 only drive the low 24-bits during PCI bus mastering, then
190 you would pass 0x00ffffff as the mask to this function. */
191 74
192extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); 75/* implement the pci_ DMA API in terms of the generic device dma_ one */
76#include <asm-generic/pci-dma-compat.h>
193 77
194#ifdef CONFIG_PCI
195static inline void pci_dma_burst_advice(struct pci_dev *pdev, 78static inline void pci_dma_burst_advice(struct pci_dev *pdev,
196 enum pci_dma_burst_strategy *strat, 79 enum pci_dma_burst_strategy *strat,
197 unsigned long *strategy_parameter) 80 unsigned long *strategy_parameter)
@@ -230,8 +113,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)
230 return hose->need_domain_info; 113 return hose->need_domain_info;
231} 114}
232 115
233struct pci_dev *alpha_gendev_to_pci(struct device *dev);
234
235#endif /* __KERNEL__ */ 116#endif /* __KERNEL__ */
236 117
237/* Values for the `which' argument to sys_pciconfig_iobase. */ 118/* Values for the `which' argument to sys_pciconfig_iobase. */
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index c19a376520f4..823a540f9f5b 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -106,58 +106,8 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn,
106 return -ENODEV; 106 return -ENODEV;
107} 107}
108 108
109/* Stubs for the routines in pci_iommu.c: */ 109static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
110 110 dma_addr_t *dma_handle, gfp_t gfp)
111void *
112__pci_alloc_consistent(struct pci_dev *pdev, size_t size,
113 dma_addr_t *dma_addrp, gfp_t gfp)
114{
115 return NULL;
116}
117
118void
119pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
120 dma_addr_t dma_addr)
121{
122}
123
124dma_addr_t
125pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size,
126 int direction)
127{
128 return (dma_addr_t) 0;
129}
130
131void
132pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
133 int direction)
134{
135}
136
137int
138pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
139 int direction)
140{
141 return 0;
142}
143
144void
145pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
146 int direction)
147{
148}
149
150int
151pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
152{
153 return 0;
154}
155
156/* Generic DMA mapping functions: */
157
158void *
159dma_alloc_coherent(struct device *dev, size_t size,
160 dma_addr_t *dma_handle, gfp_t gfp)
161{ 111{
162 void *ret; 112 void *ret;
163 113
@@ -171,11 +121,22 @@ dma_alloc_coherent(struct device *dev, size_t size,
171 return ret; 121 return ret;
172} 122}
173 123
174EXPORT_SYMBOL(dma_alloc_coherent); 124static void alpha_noop_free_coherent(struct device *dev, size_t size,
125 void *cpu_addr, dma_addr_t dma_addr)
126{
127 free_pages((unsigned long)cpu_addr, get_order(size));
128}
129
130static dma_addr_t alpha_noop_map_page(struct device *dev, struct page *page,
131 unsigned long offset, size_t size,
132 enum dma_data_direction dir,
133 struct dma_attrs *attrs)
134{
135 return page_to_pa(page) + offset;
136}
175 137
176int 138static int alpha_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
177dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 139 enum dma_data_direction dir, struct dma_attrs *attrs)
178 enum dma_data_direction direction)
179{ 140{
180 int i; 141 int i;
181 struct scatterlist *sg; 142 struct scatterlist *sg;
@@ -192,19 +153,37 @@ dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
192 return nents; 153 return nents;
193} 154}
194 155
195EXPORT_SYMBOL(dma_map_sg); 156static int alpha_noop_mapping_error(struct device *dev, dma_addr_t dma_addr)
157{
158 return 0;
159}
160
161static int alpha_noop_supported(struct device *dev, u64 mask)
162{
163 return mask < 0x00ffffffUL ? 0 : 1;
164}
196 165
197int 166static int alpha_noop_set_mask(struct device *dev, u64 mask)
198dma_set_mask(struct device *dev, u64 mask)
199{ 167{
200 if (!dev->dma_mask || !dma_supported(dev, mask)) 168 if (!dev->dma_mask || !dma_supported(dev, mask))
201 return -EIO; 169 return -EIO;
202 170
203 *dev->dma_mask = mask; 171 *dev->dma_mask = mask;
204
205 return 0; 172 return 0;
206} 173}
207EXPORT_SYMBOL(dma_set_mask); 174
175struct dma_map_ops alpha_noop_ops = {
176 .alloc_coherent = alpha_noop_alloc_coherent,
177 .free_coherent = alpha_noop_free_coherent,
178 .map_page = alpha_noop_map_page,
179 .map_sg = alpha_noop_map_sg,
180 .mapping_error = alpha_noop_mapping_error,
181 .dma_supported = alpha_noop_supported,
182 .set_dma_mask = alpha_noop_set_mask,
183};
184
185struct dma_map_ops *dma_ops = &alpha_noop_ops;
186EXPORT_SYMBOL(dma_ops);
208 187
209void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 188void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
210{ 189{
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 8449504f5e0b..ce9e54c887fa 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -216,10 +216,30 @@ iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
216 for (i = 0; i < n; ++i) 216 for (i = 0; i < n; ++i)
217 p[i] = 0; 217 p[i] = 0;
218} 218}
219 219
220/* True if the machine supports DAC addressing, and DEV can 220/*
221 make use of it given MASK. */ 221 * True if the machine supports DAC addressing, and DEV can
222static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask); 222 * make use of it given MASK.
223 */
224static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
225{
226 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
227 int ok = 1;
228
229 /* If this is not set, the machine doesn't support DAC at all. */
230 if (dac_offset == 0)
231 ok = 0;
232
233 /* The device has to be able to address our DAC bit. */
234 if ((dac_offset & dev->dma_mask) != dac_offset)
235 ok = 0;
236
237 /* If both conditions above are met, we are fine. */
238 DBGA("pci_dac_dma_supported %s from %p\n",
239 ok ? "yes" : "no", __builtin_return_address(0));
240
241 return ok;
242}
223 243
224/* Map a single buffer of the indicated size for PCI DMA in streaming 244/* Map a single buffer of the indicated size for PCI DMA in streaming
225 mode. The 32-bit PCI bus mastering address to use is returned. 245 mode. The 32-bit PCI bus mastering address to use is returned.
@@ -301,23 +321,36 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
301 return ret; 321 return ret;
302} 322}
303 323
304dma_addr_t 324/* Helper for generic DMA-mapping functions. */
305pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir) 325static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
306{ 326{
307 int dac_allowed; 327 if (dev && dev->bus == &pci_bus_type)
328 return to_pci_dev(dev);
308 329
309 if (dir == PCI_DMA_NONE) 330 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
310 BUG(); 331 BUG() otherwise. */
332 BUG_ON(!isa_bridge);
311 333
312 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; 334 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
313 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed); 335 bridge is bus master then). */
336 if (!dev || !dev->dma_mask || !*dev->dma_mask)
337 return isa_bridge;
338
339 /* For EISA bus masters, return isa_bridge (it might have smaller
340 dma_mask due to wiring limitations). */
341 if (*dev->dma_mask >= isa_bridge->dma_mask)
342 return isa_bridge;
343
344 /* This assumes ISA bus master with dma_mask 0xffffff. */
345 return NULL;
314} 346}
315EXPORT_SYMBOL(pci_map_single);
316 347
317dma_addr_t 348static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
318pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset, 349 unsigned long offset, size_t size,
319 size_t size, int dir) 350 enum dma_data_direction dir,
351 struct dma_attrs *attrs)
320{ 352{
353 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
321 int dac_allowed; 354 int dac_allowed;
322 355
323 if (dir == PCI_DMA_NONE) 356 if (dir == PCI_DMA_NONE)
@@ -327,7 +360,6 @@ pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
327 return pci_map_single_1(pdev, (char *)page_address(page) + offset, 360 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
328 size, dac_allowed); 361 size, dac_allowed);
329} 362}
330EXPORT_SYMBOL(pci_map_page);
331 363
332/* Unmap a single streaming mode DMA translation. The DMA_ADDR and 364/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
333 SIZE must match what was provided for in a previous pci_map_single 365 SIZE must match what was provided for in a previous pci_map_single
@@ -335,16 +367,17 @@ EXPORT_SYMBOL(pci_map_page);
335 the cpu to the buffer are guaranteed to see whatever the device 367 the cpu to the buffer are guaranteed to see whatever the device
336 wrote there. */ 368 wrote there. */
337 369
338void 370static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
339pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, 371 size_t size, enum dma_data_direction dir,
340 int direction) 372 struct dma_attrs *attrs)
341{ 373{
342 unsigned long flags; 374 unsigned long flags;
375 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
343 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; 376 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
344 struct pci_iommu_arena *arena; 377 struct pci_iommu_arena *arena;
345 long dma_ofs, npages; 378 long dma_ofs, npages;
346 379
347 if (direction == PCI_DMA_NONE) 380 if (dir == PCI_DMA_NONE)
348 BUG(); 381 BUG();
349 382
350 if (dma_addr >= __direct_map_base 383 if (dma_addr >= __direct_map_base
@@ -393,25 +426,16 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
393 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n", 426 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n",
394 dma_addr, size, npages, __builtin_return_address(0)); 427 dma_addr, size, npages, __builtin_return_address(0));
395} 428}
396EXPORT_SYMBOL(pci_unmap_single);
397
398void
399pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
400 size_t size, int direction)
401{
402 pci_unmap_single(pdev, dma_addr, size, direction);
403}
404EXPORT_SYMBOL(pci_unmap_page);
405 429
406/* Allocate and map kernel buffer using consistent mode DMA for PCI 430/* Allocate and map kernel buffer using consistent mode DMA for PCI
407 device. Returns non-NULL cpu-view pointer to the buffer if 431 device. Returns non-NULL cpu-view pointer to the buffer if
408 successful and sets *DMA_ADDRP to the pci side dma address as well, 432 successful and sets *DMA_ADDRP to the pci side dma address as well,
409 else DMA_ADDRP is undefined. */ 433 else DMA_ADDRP is undefined. */
410 434
411void * 435static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
412__pci_alloc_consistent(struct pci_dev *pdev, size_t size, 436 dma_addr_t *dma_addrp, gfp_t gfp)
413 dma_addr_t *dma_addrp, gfp_t gfp)
414{ 437{
438 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
415 void *cpu_addr; 439 void *cpu_addr;
416 long order = get_order(size); 440 long order = get_order(size);
417 441
@@ -439,13 +463,12 @@ try_again:
439 gfp |= GFP_DMA; 463 gfp |= GFP_DMA;
440 goto try_again; 464 goto try_again;
441 } 465 }
442 466
443 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n", 467 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n",
444 size, cpu_addr, *dma_addrp, __builtin_return_address(0)); 468 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
445 469
446 return cpu_addr; 470 return cpu_addr;
447} 471}
448EXPORT_SYMBOL(__pci_alloc_consistent);
449 472
450/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must 473/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
451 be values that were returned from pci_alloc_consistent. SIZE must 474 be values that were returned from pci_alloc_consistent. SIZE must
@@ -453,17 +476,16 @@ EXPORT_SYMBOL(__pci_alloc_consistent);
453 References to the memory and mappings associated with CPU_ADDR or 476 References to the memory and mappings associated with CPU_ADDR or
454 DMA_ADDR past this call are illegal. */ 477 DMA_ADDR past this call are illegal. */
455 478
456void 479static void alpha_pci_free_coherent(struct device *dev, size_t size,
457pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr, 480 void *cpu_addr, dma_addr_t dma_addr)
458 dma_addr_t dma_addr)
459{ 481{
482 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
460 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); 483 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
461 free_pages((unsigned long)cpu_addr, get_order(size)); 484 free_pages((unsigned long)cpu_addr, get_order(size));
462 485
463 DBGA2("pci_free_consistent: [%llx,%zx] from %p\n", 486 DBGA2("pci_free_consistent: [%llx,%zx] from %p\n",
464 dma_addr, size, __builtin_return_address(0)); 487 dma_addr, size, __builtin_return_address(0));
465} 488}
466EXPORT_SYMBOL(pci_free_consistent);
467 489
468/* Classify the elements of the scatterlist. Write dma_address 490/* Classify the elements of the scatterlist. Write dma_address
469 of each element with: 491 of each element with:
@@ -626,23 +648,21 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
626 return 1; 648 return 1;
627} 649}
628 650
629int 651static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
630pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, 652 int nents, enum dma_data_direction dir,
631 int direction) 653 struct dma_attrs *attrs)
632{ 654{
655 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
633 struct scatterlist *start, *end, *out; 656 struct scatterlist *start, *end, *out;
634 struct pci_controller *hose; 657 struct pci_controller *hose;
635 struct pci_iommu_arena *arena; 658 struct pci_iommu_arena *arena;
636 dma_addr_t max_dma; 659 dma_addr_t max_dma;
637 int dac_allowed; 660 int dac_allowed;
638 struct device *dev;
639 661
640 if (direction == PCI_DMA_NONE) 662 if (dir == PCI_DMA_NONE)
641 BUG(); 663 BUG();
642 664
643 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; 665 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
644
645 dev = pdev ? &pdev->dev : NULL;
646 666
647 /* Fast path single entry scatterlists. */ 667 /* Fast path single entry scatterlists. */
648 if (nents == 1) { 668 if (nents == 1) {
@@ -699,19 +719,19 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
699 /* Some allocation failed while mapping the scatterlist 719 /* Some allocation failed while mapping the scatterlist
700 entries. Unmap them now. */ 720 entries. Unmap them now. */
701 if (out > start) 721 if (out > start)
702 pci_unmap_sg(pdev, start, out - start, direction); 722 pci_unmap_sg(pdev, start, out - start, dir);
703 return 0; 723 return 0;
704} 724}
705EXPORT_SYMBOL(pci_map_sg);
706 725
707/* Unmap a set of streaming mode DMA translations. Again, cpu read 726/* Unmap a set of streaming mode DMA translations. Again, cpu read
708 rules concerning calls here are the same as for pci_unmap_single() 727 rules concerning calls here are the same as for pci_unmap_single()
709 above. */ 728 above. */
710 729
711void 730static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
712pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, 731 int nents, enum dma_data_direction dir,
713 int direction) 732 struct dma_attrs *attrs)
714{ 733{
734 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
715 unsigned long flags; 735 unsigned long flags;
716 struct pci_controller *hose; 736 struct pci_controller *hose;
717 struct pci_iommu_arena *arena; 737 struct pci_iommu_arena *arena;
@@ -719,7 +739,7 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
719 dma_addr_t max_dma; 739 dma_addr_t max_dma;
720 dma_addr_t fbeg, fend; 740 dma_addr_t fbeg, fend;
721 741
722 if (direction == PCI_DMA_NONE) 742 if (dir == PCI_DMA_NONE)
723 BUG(); 743 BUG();
724 744
725 if (! alpha_mv.mv_pci_tbi) 745 if (! alpha_mv.mv_pci_tbi)
@@ -783,15 +803,13 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
783 803
784 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); 804 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
785} 805}
786EXPORT_SYMBOL(pci_unmap_sg);
787
788 806
789/* Return whether the given PCI device DMA address mask can be 807/* Return whether the given PCI device DMA address mask can be
790 supported properly. */ 808 supported properly. */
791 809
792int 810static int alpha_pci_supported(struct device *dev, u64 mask)
793pci_dma_supported(struct pci_dev *pdev, u64 mask)
794{ 811{
812 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
795 struct pci_controller *hose; 813 struct pci_controller *hose;
796 struct pci_iommu_arena *arena; 814 struct pci_iommu_arena *arena;
797 815
@@ -818,7 +836,6 @@ pci_dma_supported(struct pci_dev *pdev, u64 mask)
818 836
819 return 0; 837 return 0;
820} 838}
821EXPORT_SYMBOL(pci_dma_supported);
822 839
823 840
824/* 841/*
@@ -918,66 +935,32 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
918 return 0; 935 return 0;
919} 936}
920 937
921/* True if the machine supports DAC addressing, and DEV can 938static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
922 make use of it given MASK. */
923
924static int
925pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
926{
927 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
928 int ok = 1;
929
930 /* If this is not set, the machine doesn't support DAC at all. */
931 if (dac_offset == 0)
932 ok = 0;
933
934 /* The device has to be able to address our DAC bit. */
935 if ((dac_offset & dev->dma_mask) != dac_offset)
936 ok = 0;
937
938 /* If both conditions above are met, we are fine. */
939 DBGA("pci_dac_dma_supported %s from %p\n",
940 ok ? "yes" : "no", __builtin_return_address(0));
941
942 return ok;
943}
944
945/* Helper for generic DMA-mapping functions. */
946
947struct pci_dev *
948alpha_gendev_to_pci(struct device *dev)
949{ 939{
950 if (dev && dev->bus == &pci_bus_type) 940 return dma_addr == 0;
951 return to_pci_dev(dev);
952
953 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
954 BUG() otherwise. */
955 BUG_ON(!isa_bridge);
956
957 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
958 bridge is bus master then). */
959 if (!dev || !dev->dma_mask || !*dev->dma_mask)
960 return isa_bridge;
961
962 /* For EISA bus masters, return isa_bridge (it might have smaller
963 dma_mask due to wiring limitations). */
964 if (*dev->dma_mask >= isa_bridge->dma_mask)
965 return isa_bridge;
966
967 /* This assumes ISA bus master with dma_mask 0xffffff. */
968 return NULL;
969} 941}
970EXPORT_SYMBOL(alpha_gendev_to_pci);
971 942
972int 943static int alpha_pci_set_mask(struct device *dev, u64 mask)
973dma_set_mask(struct device *dev, u64 mask)
974{ 944{
975 if (!dev->dma_mask || 945 if (!dev->dma_mask ||
976 !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) 946 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
977 return -EIO; 947 return -EIO;
978 948
979 *dev->dma_mask = mask; 949 *dev->dma_mask = mask;
980
981 return 0; 950 return 0;
982} 951}
983EXPORT_SYMBOL(dma_set_mask); 952
953struct dma_map_ops alpha_pci_ops = {
954 .alloc_coherent = alpha_pci_alloc_coherent,
955 .free_coherent = alpha_pci_free_coherent,
956 .map_page = alpha_pci_map_page,
957 .unmap_page = alpha_pci_unmap_page,
958 .map_sg = alpha_pci_map_sg,
959 .unmap_sg = alpha_pci_unmap_sg,
960 .mapping_error = alpha_pci_mapping_error,
961 .dma_supported = alpha_pci_supported,
962 .set_dma_mask = alpha_pci_set_mask,
963};
964
965struct dma_map_ops *dma_ops = &alpha_pci_ops;
966EXPORT_SYMBOL(dma_ops);