aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorBecky Bruce <becky.bruce@freescale.com>2008-09-12 06:34:46 -0400
committerKumar Gala <galak@kernel.crashing.org>2008-09-24 17:26:45 -0400
commit4fc665b88a79a45bae8bbf3a05563c27c7337c3d (patch)
treeca668c2fab7c3a4d62b92174f4a5fcae2625cdd1 /arch/powerpc
parent8fae0353247530d2124b2419052fa6120462fa99 (diff)
powerpc: Merge 32 and 64-bit dma code
We essentially adopt the 64-bit dma code, with some changes to support 32-bit systems, including HIGHMEM. dma functions on 32-bit are now invoked via accessor functions which call the correct op for a device based on archdata dma_ops. If there is no archdata dma_ops, this defaults to dma_direct_ops. In addition, the dma_map/unmap_page functions are added to dma_ops because we can't just fall back on map/unmap_single when HIGHMEM is enabled. In the case of dma_direct_*, we stop using map/unmap_single and just use the page version - this saves a lot of ugly ifdeffing. We leave map/unmap_single in the dma_ops definition, though, because they are needed by the iommu code, which does not implement map/unmap_page. Ideally, going forward, we will completely eliminate map/unmap_single and just have map/unmap_page, if it's workable for 64-bit. Signed-off-by: Becky Bruce <becky.bruce@freescale.com> Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h187
-rw-r--r--arch/powerpc/include/asm/machdep.h5
-rw-r--r--arch/powerpc/include/asm/pci.h14
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/dma.c69
-rw-r--r--arch/powerpc/kernel/pci-common.c48
-rw-r--r--arch/powerpc/kernel/pci_32.c7
-rw-r--r--arch/powerpc/kernel/pci_64.c46
8 files changed, 175 insertions, 205 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index c7ca45f97dd2..fddb229bd74f 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -44,8 +44,6 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
44 44
45#endif /* ! CONFIG_NOT_COHERENT_CACHE */ 45#endif /* ! CONFIG_NOT_COHERENT_CACHE */
46 46
47#ifdef CONFIG_PPC64
48
49static inline unsigned long device_to_mask(struct device *dev) 47static inline unsigned long device_to_mask(struct device *dev)
50{ 48{
51 if (dev->dma_mask && *dev->dma_mask) 49 if (dev->dma_mask && *dev->dma_mask)
@@ -76,8 +74,24 @@ struct dma_mapping_ops {
76 struct dma_attrs *attrs); 74 struct dma_attrs *attrs);
77 int (*dma_supported)(struct device *dev, u64 mask); 75 int (*dma_supported)(struct device *dev, u64 mask);
78 int (*set_dma_mask)(struct device *dev, u64 dma_mask); 76 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
77 dma_addr_t (*map_page)(struct device *dev, struct page *page,
78 unsigned long offset, size_t size,
79 enum dma_data_direction direction,
80 struct dma_attrs *attrs);
81 void (*unmap_page)(struct device *dev,
82 dma_addr_t dma_address, size_t size,
83 enum dma_data_direction direction,
84 struct dma_attrs *attrs);
79}; 85};
80 86
87/*
88 * Available generic sets of operations
89 */
90#ifdef CONFIG_PPC64
91extern struct dma_mapping_ops dma_iommu_ops;
92#endif
93extern struct dma_mapping_ops dma_direct_ops;
94
81static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) 95static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
82{ 96{
83 /* We don't handle the NULL dev case for ISA for now. We could 97 /* We don't handle the NULL dev case for ISA for now. We could
@@ -85,8 +99,19 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
85 * only ISA DMA device we support is the floppy and we have a hack 99 * only ISA DMA device we support is the floppy and we have a hack
86 * in the floppy driver directly to get a device for us. 100 * in the floppy driver directly to get a device for us.
87 */ 101 */
88 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) 102
103 if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) {
104#ifdef CONFIG_PPC64
89 return NULL; 105 return NULL;
106#else
107 /* Use default on 32-bit if dma_ops is not set up */
108 /* TODO: Long term, we should fix drivers so that dev and
109 * archdata dma_ops are set up for all buses.
110 */
111 return &dma_direct_ops;
112#endif
113 }
114
90 return dev->archdata.dma_ops; 115 return dev->archdata.dma_ops;
91} 116}
92 117
@@ -123,6 +148,12 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
123 return 0; 148 return 0;
124} 149}
125 150
151/*
152 * TODO: map_/unmap_single will ideally go away, to be completely
153 * replaced by map/unmap_page. Until then, we allow dma_ops to have
154 * one or the other, or both by checking to see if the specific
155 * function requested exists; and if not, falling back on the other set.
156 */
126static inline dma_addr_t dma_map_single_attrs(struct device *dev, 157static inline dma_addr_t dma_map_single_attrs(struct device *dev,
127 void *cpu_addr, 158 void *cpu_addr,
128 size_t size, 159 size_t size,
@@ -132,7 +163,14 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
132 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 163 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
133 164
134 BUG_ON(!dma_ops); 165 BUG_ON(!dma_ops);
135 return dma_ops->map_single(dev, cpu_addr, size, direction, attrs); 166
167 if (dma_ops->map_single)
168 return dma_ops->map_single(dev, cpu_addr, size, direction,
169 attrs);
170
171 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
172 (unsigned long)cpu_addr % PAGE_SIZE, size,
173 direction, attrs);
136} 174}
137 175
138static inline void dma_unmap_single_attrs(struct device *dev, 176static inline void dma_unmap_single_attrs(struct device *dev,
@@ -144,7 +182,13 @@ static inline void dma_unmap_single_attrs(struct device *dev,
144 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 182 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
145 183
146 BUG_ON(!dma_ops); 184 BUG_ON(!dma_ops);
147 dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); 185
186 if (dma_ops->unmap_single) {
187 dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
188 return;
189 }
190
191 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
148} 192}
149 193
150static inline dma_addr_t dma_map_page_attrs(struct device *dev, 194static inline dma_addr_t dma_map_page_attrs(struct device *dev,
@@ -156,8 +200,13 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
156 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 200 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
157 201
158 BUG_ON(!dma_ops); 202 BUG_ON(!dma_ops);
203
204 if (dma_ops->map_page)
205 return dma_ops->map_page(dev, page, offset, size, direction,
206 attrs);
207
159 return dma_ops->map_single(dev, page_address(page) + offset, size, 208 return dma_ops->map_single(dev, page_address(page) + offset, size,
160 direction, attrs); 209 direction, attrs);
161} 210}
162 211
163static inline void dma_unmap_page_attrs(struct device *dev, 212static inline void dma_unmap_page_attrs(struct device *dev,
@@ -169,6 +218,12 @@ static inline void dma_unmap_page_attrs(struct device *dev,
169 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 218 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
170 219
171 BUG_ON(!dma_ops); 220 BUG_ON(!dma_ops);
221
222 if (dma_ops->unmap_page) {
223 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
224 return;
225 }
226
172 dma_ops->unmap_single(dev, dma_address, size, direction, attrs); 227 dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
173} 228}
174 229
@@ -253,126 +308,6 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
253 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); 308 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
254} 309}
255 310
256/*
257 * Available generic sets of operations
258 */
259extern struct dma_mapping_ops dma_iommu_ops;
260extern struct dma_mapping_ops dma_direct_ops;
261
262#else /* CONFIG_PPC64 */
263
264#define dma_supported(dev, mask) (1)
265
266static inline int dma_set_mask(struct device *dev, u64 dma_mask)
267{
268 if (!dev->dma_mask || !dma_supported(dev, mask))
269 return -EIO;
270
271 *dev->dma_mask = dma_mask;
272
273 return 0;
274}
275
276static inline void *dma_alloc_coherent(struct device *dev, size_t size,
277 dma_addr_t * dma_handle,
278 gfp_t gfp)
279{
280#ifdef CONFIG_NOT_COHERENT_CACHE
281 return __dma_alloc_coherent(size, dma_handle, gfp);
282#else
283 void *ret;
284 /* ignore region specifiers */
285 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
286
287 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
288 gfp |= GFP_DMA;
289
290 ret = (void *)__get_free_pages(gfp, get_order(size));
291
292 if (ret != NULL) {
293 memset(ret, 0, size);
294 *dma_handle = virt_to_bus(ret);
295 }
296
297 return ret;
298#endif
299}
300
301static inline void
302dma_free_coherent(struct device *dev, size_t size, void *vaddr,
303 dma_addr_t dma_handle)
304{
305#ifdef CONFIG_NOT_COHERENT_CACHE
306 __dma_free_coherent(size, vaddr);
307#else
308 free_pages((unsigned long)vaddr, get_order(size));
309#endif
310}
311
312static inline dma_addr_t
313dma_map_single(struct device *dev, void *ptr, size_t size,
314 enum dma_data_direction direction)
315{
316 BUG_ON(direction == DMA_NONE);
317
318 __dma_sync(ptr, size, direction);
319
320 return virt_to_bus(ptr);
321}
322
323static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
324 size_t size,
325 enum dma_data_direction direction)
326{
327 /* We do nothing. */
328}
329
330static inline dma_addr_t
331dma_map_page(struct device *dev, struct page *page,
332 unsigned long offset, size_t size,
333 enum dma_data_direction direction)
334{
335 BUG_ON(direction == DMA_NONE);
336
337 __dma_sync_page(page, offset, size, direction);
338
339 return page_to_bus(page) + offset;
340}
341
342static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
343 size_t size,
344 enum dma_data_direction direction)
345{
346 /* We do nothing. */
347}
348
349static inline int
350dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
351 enum dma_data_direction direction)
352{
353 struct scatterlist *sg;
354 int i;
355
356 BUG_ON(direction == DMA_NONE);
357
358 for_each_sg(sgl, sg, nents, i) {
359 BUG_ON(!sg_page(sg));
360 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
361 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
362 }
363
364 return nents;
365}
366
367static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
368 int nhwentries,
369 enum dma_data_direction direction)
370{
371 /* We don't do anything here. */
372}
373
374#endif /* CONFIG_PPC64 */
375
376static inline void dma_sync_single_for_cpu(struct device *dev, 311static inline void dma_sync_single_for_cpu(struct device *dev,
377 dma_addr_t dma_handle, size_t size, 312 dma_addr_t dma_handle, size_t size,
378 enum dma_data_direction direction) 313 enum dma_data_direction direction)
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 893aafd87fde..2740c44ff717 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -88,8 +88,6 @@ struct machdep_calls {
88 unsigned long (*tce_get)(struct iommu_table *tbl, 88 unsigned long (*tce_get)(struct iommu_table *tbl,
89 long index); 89 long index);
90 void (*tce_flush)(struct iommu_table *tbl); 90 void (*tce_flush)(struct iommu_table *tbl);
91 void (*pci_dma_dev_setup)(struct pci_dev *dev);
92 void (*pci_dma_bus_setup)(struct pci_bus *bus);
93 91
94 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, 92 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
95 unsigned long flags); 93 unsigned long flags);
@@ -101,6 +99,9 @@ struct machdep_calls {
101#endif 99#endif
102#endif /* CONFIG_PPC64 */ 100#endif /* CONFIG_PPC64 */
103 101
102 void (*pci_dma_dev_setup)(struct pci_dev *dev);
103 void (*pci_dma_bus_setup)(struct pci_bus *bus);
104
104 int (*probe)(void); 105 int (*probe)(void);
105 void (*setup_arch)(void); /* Optional, may be NULL */ 106 void (*setup_arch)(void); /* Optional, may be NULL */
106 void (*init_early)(void); 107 void (*init_early)(void);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index a05a942b1c25..0e52c7828ea4 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -60,6 +60,14 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
60 return channel ? 15 : 14; 60 return channel ? 15 : 14;
61} 61}
62 62
63#ifdef CONFIG_PCI
64extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops);
65extern struct dma_mapping_ops *get_pci_dma_ops(void);
66#else /* CONFIG_PCI */
67#define set_pci_dma_ops(d)
68#define get_pci_dma_ops() NULL
69#endif
70
63#ifdef CONFIG_PPC64 71#ifdef CONFIG_PPC64
64 72
65/* 73/*
@@ -70,9 +78,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
70#define PCI_DISABLE_MWI 78#define PCI_DISABLE_MWI
71 79
72#ifdef CONFIG_PCI 80#ifdef CONFIG_PCI
73extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops);
74extern struct dma_mapping_ops *get_pci_dma_ops(void);
75
76static inline void pci_dma_burst_advice(struct pci_dev *pdev, 81static inline void pci_dma_burst_advice(struct pci_dev *pdev,
77 enum pci_dma_burst_strategy *strat, 82 enum pci_dma_burst_strategy *strat,
78 unsigned long *strategy_parameter) 83 unsigned long *strategy_parameter)
@@ -89,9 +94,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
89 *strat = PCI_DMA_BURST_MULTIPLE; 94 *strat = PCI_DMA_BURST_MULTIPLE;
90 *strategy_parameter = cacheline_size; 95 *strategy_parameter = cacheline_size;
91} 96}
92#else /* CONFIG_PCI */
93#define set_pci_dma_ops(d)
94#define get_pci_dma_ops() NULL
95#endif 97#endif
96 98
97#else /* 32-bit */ 99#else /* 32-bit */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 09b3cabf2f91..fdb58253fa5b 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -70,10 +70,10 @@ extra-$(CONFIG_8xx) := head_8xx.o
70extra-y += vmlinux.lds 70extra-y += vmlinux.lds
71 71
72obj-y += time.o prom.o traps.o setup-common.o \ 72obj-y += time.o prom.o traps.o setup-common.o \
73 udbg.o misc.o io.o \ 73 udbg.o misc.o io.o dma.o \
74 misc_$(CONFIG_WORD_SIZE).o 74 misc_$(CONFIG_WORD_SIZE).o
75obj-$(CONFIG_PPC32) += entry_32.o setup_32.o 75obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
76obj-$(CONFIG_PPC64) += dma.o dma-iommu.o iommu.o 76obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
77obj-$(CONFIG_KGDB) += kgdb.o 77obj-$(CONFIG_KGDB) += kgdb.o
78obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o 78obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
79obj-$(CONFIG_MODULES) += ppc_ksyms.o 79obj-$(CONFIG_MODULES) += ppc_ksyms.o
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 124f86743bde..41fdd48bf433 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -16,21 +16,30 @@
16 * This implementation supports a per-device offset that can be applied if 16 * This implementation supports a per-device offset that can be applied if
17 * the address at which memory is visible to devices is not 0. Platform code 17 * the address at which memory is visible to devices is not 0. Platform code
18 * can set archdata.dma_data to an unsigned long holding the offset. By 18 * can set archdata.dma_data to an unsigned long holding the offset. By
19 * default the offset is zero. 19 * default the offset is PCI_DRAM_OFFSET.
20 */ 20 */
21 21
22static unsigned long get_dma_direct_offset(struct device *dev) 22static unsigned long get_dma_direct_offset(struct device *dev)
23{ 23{
24 return (unsigned long)dev->archdata.dma_data; 24 if (dev)
25 return (unsigned long)dev->archdata.dma_data;
26
27 return PCI_DRAM_OFFSET;
25} 28}
26 29
27static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 30void *dma_direct_alloc_coherent(struct device *dev, size_t size,
28 dma_addr_t *dma_handle, gfp_t flag) 31 dma_addr_t *dma_handle, gfp_t flag)
29{ 32{
33#ifdef CONFIG_NOT_COHERENT_CACHE
34 return __dma_alloc_coherent(size, dma_handle, flag);
35#else
30 struct page *page; 36 struct page *page;
31 void *ret; 37 void *ret;
32 int node = dev_to_node(dev); 38 int node = dev_to_node(dev);
33 39
40 /* ignore region specifiers */
41 flag &= ~(__GFP_HIGHMEM);
42
34 page = alloc_pages_node(node, flag, get_order(size)); 43 page = alloc_pages_node(node, flag, get_order(size));
35 if (page == NULL) 44 if (page == NULL)
36 return NULL; 45 return NULL;
@@ -39,27 +48,17 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
39 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev); 48 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
40 49
41 return ret; 50 return ret;
51#endif
42} 52}
43 53
44static void dma_direct_free_coherent(struct device *dev, size_t size, 54void dma_direct_free_coherent(struct device *dev, size_t size,
45 void *vaddr, dma_addr_t dma_handle) 55 void *vaddr, dma_addr_t dma_handle)
46{ 56{
57#ifdef CONFIG_NOT_COHERENT_CACHE
58 __dma_free_coherent(size, vaddr);
59#else
47 free_pages((unsigned long)vaddr, get_order(size)); 60 free_pages((unsigned long)vaddr, get_order(size));
48} 61#endif
49
50static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
51 size_t size,
52 enum dma_data_direction direction,
53 struct dma_attrs *attrs)
54{
55 return virt_to_abs(ptr) + get_dma_direct_offset(dev);
56}
57
58static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
59 size_t size,
60 enum dma_data_direction direction,
61 struct dma_attrs *attrs)
62{
63} 62}
64 63
65static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 64static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
@@ -85,20 +84,44 @@ static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
85 84
86static int dma_direct_dma_supported(struct device *dev, u64 mask) 85static int dma_direct_dma_supported(struct device *dev, u64 mask)
87{ 86{
87#ifdef CONFIG_PPC64
88 /* Could be improved to check for memory though it better be 88 /* Could be improved to check for memory though it better be
89 * done via some global so platforms can set the limit in case 89 * done via some global so platforms can set the limit in case
90 * they have limited DMA windows 90 * they have limited DMA windows
91 */ 91 */
92 return mask >= DMA_32BIT_MASK; 92 return mask >= DMA_32BIT_MASK;
93#else
94 return 1;
95#endif
96}
97
98static inline dma_addr_t dma_direct_map_page(struct device *dev,
99 struct page *page,
100 unsigned long offset,
101 size_t size,
102 enum dma_data_direction dir,
103 struct dma_attrs *attrs)
104{
105 BUG_ON(dir == DMA_NONE);
106 __dma_sync_page(page, offset, size, dir);
107 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
108}
109
110static inline void dma_direct_unmap_page(struct device *dev,
111 dma_addr_t dma_address,
112 size_t size,
113 enum dma_data_direction direction,
114 struct dma_attrs *attrs)
115{
93} 116}
94 117
95struct dma_mapping_ops dma_direct_ops = { 118struct dma_mapping_ops dma_direct_ops = {
96 .alloc_coherent = dma_direct_alloc_coherent, 119 .alloc_coherent = dma_direct_alloc_coherent,
97 .free_coherent = dma_direct_free_coherent, 120 .free_coherent = dma_direct_free_coherent,
98 .map_single = dma_direct_map_single,
99 .unmap_single = dma_direct_unmap_single,
100 .map_sg = dma_direct_map_sg, 121 .map_sg = dma_direct_map_sg,
101 .unmap_sg = dma_direct_unmap_sg, 122 .unmap_sg = dma_direct_unmap_sg,
102 .dma_supported = dma_direct_dma_supported, 123 .dma_supported = dma_direct_dma_supported,
124 .map_page = dma_direct_map_page,
125 .unmap_page = dma_direct_unmap_page,
103}; 126};
104EXPORT_SYMBOL(dma_direct_ops); 127EXPORT_SYMBOL(dma_direct_ops);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index ea0c61e09b76..52ccfed416ad 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -56,6 +56,34 @@ resource_size_t isa_mem_base;
56/* Default PCI flags is 0 */ 56/* Default PCI flags is 0 */
57unsigned int ppc_pci_flags; 57unsigned int ppc_pci_flags;
58 58
59static struct dma_mapping_ops *pci_dma_ops;
60
61void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
62{
63 pci_dma_ops = dma_ops;
64}
65
66struct dma_mapping_ops *get_pci_dma_ops(void)
67{
68 return pci_dma_ops;
69}
70EXPORT_SYMBOL(get_pci_dma_ops);
71
72int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
73{
74 return dma_set_mask(&dev->dev, mask);
75}
76
77int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
78{
79 int rc;
80
81 rc = dma_set_mask(&dev->dev, mask);
82 dev->dev.coherent_dma_mask = dev->dma_mask;
83
84 return rc;
85}
86
59struct pci_controller *pcibios_alloc_controller(struct device_node *dev) 87struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
60{ 88{
61 struct pci_controller *phb; 89 struct pci_controller *phb;
@@ -180,6 +208,26 @@ char __devinit *pcibios_setup(char *str)
180 return str; 208 return str;
181} 209}
182 210
211void __devinit pcibios_setup_new_device(struct pci_dev *dev)
212{
213 struct dev_archdata *sd = &dev->dev.archdata;
214
215 sd->of_node = pci_device_to_OF_node(dev);
216
217 DBG("PCI: device %s OF node: %s\n", pci_name(dev),
218 sd->of_node ? sd->of_node->full_name : "<none>");
219
220 sd->dma_ops = pci_dma_ops;
221#ifdef CONFIG_PPC32
222 sd->dma_data = (void *)PCI_DRAM_OFFSET;
223#endif
224 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
225
226 if (ppc_md.pci_dma_dev_setup)
227 ppc_md.pci_dma_dev_setup(dev);
228}
229EXPORT_SYMBOL(pcibios_setup_new_device);
230
183/* 231/*
184 * Reads the interrupt pin to determine if interrupt is use by card. 232 * Reads the interrupt pin to determine if interrupt is use by card.
185 * If the interrupt is used, then gets the interrupt line from the 233 * If the interrupt is used, then gets the interrupt line from the
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 88db4ffaf11c..174b77ee18ff 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -424,6 +424,7 @@ void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
424 unsigned long io_offset; 424 unsigned long io_offset;
425 struct resource *res; 425 struct resource *res;
426 int i; 426 int i;
427 struct pci_dev *dev;
427 428
428 /* Hookup PHB resources */ 429 /* Hookup PHB resources */
429 io_offset = (unsigned long)hose->io_base_virt - isa_io_base; 430 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
@@ -457,6 +458,12 @@ void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
457 bus->resource[i+1] = res; 458 bus->resource[i+1] = res;
458 } 459 }
459 } 460 }
461
462 if (ppc_md.pci_dma_bus_setup)
463 ppc_md.pci_dma_bus_setup(bus);
464
465 list_for_each_entry(dev, &bus->devices, bus_list)
466 pcibios_setup_new_device(dev);
460} 467}
461 468
462/* the next one is stolen from the alpha port... */ 469/* the next one is stolen from the alpha port... */
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 1f75bf03446f..8247cff1cb3e 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -52,35 +52,6 @@ EXPORT_SYMBOL(pci_io_base);
52 52
53LIST_HEAD(hose_list); 53LIST_HEAD(hose_list);
54 54
55static struct dma_mapping_ops *pci_dma_ops;
56
57void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
58{
59 pci_dma_ops = dma_ops;
60}
61
62struct dma_mapping_ops *get_pci_dma_ops(void)
63{
64 return pci_dma_ops;
65}
66EXPORT_SYMBOL(get_pci_dma_ops);
67
68
69int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
70{
71 return dma_set_mask(&dev->dev, mask);
72}
73
74int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
75{
76 int rc;
77
78 rc = dma_set_mask(&dev->dev, mask);
79 dev->dev.coherent_dma_mask = dev->dma_mask;
80
81 return rc;
82}
83
84static void fixup_broken_pcnet32(struct pci_dev* dev) 55static void fixup_broken_pcnet32(struct pci_dev* dev)
85{ 56{
86 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { 57 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
@@ -548,23 +519,6 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
548} 519}
549EXPORT_SYMBOL_GPL(pcibios_map_io_space); 520EXPORT_SYMBOL_GPL(pcibios_map_io_space);
550 521
551void __devinit pcibios_setup_new_device(struct pci_dev *dev)
552{
553 struct dev_archdata *sd = &dev->dev.archdata;
554
555 sd->of_node = pci_device_to_OF_node(dev);
556
557 DBG("PCI: device %s OF node: %s\n", pci_name(dev),
558 sd->of_node ? sd->of_node->full_name : "<none>");
559
560 sd->dma_ops = pci_dma_ops;
561 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
562
563 if (ppc_md.pci_dma_dev_setup)
564 ppc_md.pci_dma_dev_setup(dev);
565}
566EXPORT_SYMBOL(pcibios_setup_new_device);
567
568void __devinit pcibios_do_bus_setup(struct pci_bus *bus) 522void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
569{ 523{
570 struct pci_dev *dev; 524 struct pci_dev *dev;