aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-08-09 22:53:16 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-10 03:35:00 -0400
commitee664a9252d24ef10317d1bba8fc8f4c6495b36c (patch)
tree5a9d50253ff11584251439b2931e95042e9631c2 /arch/sparc
parentc2c07dbd8742a26ab3f1ee8b82237a060a0d9f61 (diff)
sparc: Use asm-generic/pci-dma-compat
This converts SPARC to use asm-generic/pci-dma-compat instead of the homegrown mechnism. SPARC32 has two dma_map_ops structures for pci and sbus (removing arch/sparc/kernel/dma.c, PCI and SBUS DMA accessor). The global 'dma_ops' is set to sbus_dma_ops and get_dma_ops() returns pci32_dma_ops for pci devices so we can use the appropriate dma mapping operations. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Tested-by: Robert Reif <reif@earthlink.net> Acked-by: David S. Miller <davem@davemloft.net> Cc: tony.luck@intel.com Cc: fenghua.yu@intel.com LKML-Reference: <1249872797-1314-8-git-send-email-fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/dma-mapping.h7
-rw-r--r--arch/sparc/include/asm/pci.h3
-rw-r--r--arch/sparc/include/asm/pci_32.h105
-rw-r--r--arch/sparc/include/asm/pci_64.h88
-rw-r--r--arch/sparc/kernel/dma.c155
-rw-r--r--arch/sparc/kernel/dma.h14
-rw-r--r--arch/sparc/kernel/iommu.c4
-rw-r--r--arch/sparc/kernel/ioport.c162
-rw-r--r--arch/sparc/kernel/pci.c2
9 files changed, 96 insertions, 444 deletions
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 34c92264208..2677818dc78 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -14,10 +14,15 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
14#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 14#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
15#define dma_is_consistent(d, h) (1) 15#define dma_is_consistent(d, h) (1)
16 16
17extern struct dma_map_ops *dma_ops; 17extern struct dma_map_ops *dma_ops, pci32_dma_ops;
18extern struct bus_type pci_bus_type;
18 19
19static inline struct dma_map_ops *get_dma_ops(struct device *dev) 20static inline struct dma_map_ops *get_dma_ops(struct device *dev)
20{ 21{
22#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
23 if (dev->bus == &pci_bus_type)
24 return &pci32_dma_ops;
25#endif
21 return dma_ops; 26 return dma_ops;
22} 27}
23 28
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h
index 6e14fd17933..d9c031f9910 100644
--- a/arch/sparc/include/asm/pci.h
+++ b/arch/sparc/include/asm/pci.h
@@ -5,4 +5,7 @@
5#else 5#else
6#include <asm/pci_32.h> 6#include <asm/pci_32.h>
7#endif 7#endif
8
9#include <asm-generic/pci-dma-compat.h>
10
8#endif 11#endif
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index b41c4c19815..ac0e8369fd9 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
31 */ 31 */
32#define PCI_DMA_BUS_IS_PHYS (0) 32#define PCI_DMA_BUS_IS_PHYS (0)
33 33
34#include <asm/scatterlist.h>
35
36struct pci_dev; 34struct pci_dev;
37 35
38/* Allocate and map kernel buffer using consistent mode DMA for a device.
39 * hwdev should be valid struct pci_dev pointer for PCI devices.
40 */
41extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
42
43/* Free and unmap a consistent DMA buffer.
44 * cpu_addr is what was returned from pci_alloc_consistent,
45 * size must be the same as what as passed into pci_alloc_consistent,
46 * and likewise dma_addr must be the same as what *dma_addrp was set to.
47 *
48 * References to the memory and mappings assosciated with cpu_addr/dma_addr
49 * past this call are illegal.
50 */
51extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
52
53/* Map a single buffer of the indicated size for DMA in streaming mode.
54 * The 32-bit bus address to use is returned.
55 *
56 * Once the device is given the dma address, the device owns this memory
57 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
58 */
59extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
60
61/* Unmap a single streaming mode DMA translation. The dma_addr and size
62 * must match what was provided for in a previous pci_map_single call. All
63 * other usages are undefined.
64 *
65 * After this call, reads by the cpu to the buffer are guaranteed to see
66 * whatever the device wrote there.
67 */
68extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
69
70/* pci_unmap_{single,page} is not a nop, thus... */ 36/* pci_unmap_{single,page} is not a nop, thus... */
71#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 37#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
72 dma_addr_t ADDR_NAME; 38 dma_addr_t ADDR_NAME;
@@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
81#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 47#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
82 (((PTR)->LEN_NAME) = (VAL)) 48 (((PTR)->LEN_NAME) = (VAL))
83 49
84/*
85 * Same as above, only with pages instead of mapped addresses.
86 */
87extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
88 unsigned long offset, size_t size, int direction);
89extern void pci_unmap_page(struct pci_dev *hwdev,
90 dma_addr_t dma_address, size_t size, int direction);
91
92/* Map a set of buffers described by scatterlist in streaming
93 * mode for DMA. This is the scather-gather version of the
94 * above pci_map_single interface. Here the scatter gather list
95 * elements are each tagged with the appropriate dma address
96 * and length. They are obtained via sg_dma_{address,length}(SG).
97 *
98 * NOTE: An implementation may be able to use a smaller number of
99 * DMA address/length pairs than there are SG table elements.
100 * (for example via virtual mapping capabilities)
101 * The routine returns the number of addr/length pairs actually
102 * used, at most nents.
103 *
104 * Device ownership issues as mentioned above for pci_map_single are
105 * the same here.
106 */
107extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
108
109/* Unmap a set of streaming mode DMA translations.
110 * Again, cpu read rules concerning calls here are the same as for
111 * pci_unmap_single() above.
112 */
113extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
114
115/* Make physical memory consistent for a single
116 * streaming mode DMA translation after a transfer.
117 *
118 * If you perform a pci_map_single() but wish to interrogate the
119 * buffer using the cpu, yet do not wish to teardown the PCI dma
120 * mapping, you must call this function before doing so. At the
121 * next point you give the PCI dma address back to the card, you
122 * must first perform a pci_dma_sync_for_device, and then the device
123 * again owns the buffer.
124 */
125extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
126extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
127
128/* Make physical memory consistent for a set of streaming
129 * mode DMA translations after a transfer.
130 *
131 * The same as pci_dma_sync_single_* but for a scatter-gather list,
132 * same rules and usage.
133 */
134extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
135extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
136
137/* Return whether the given PCI device DMA address mask can
138 * be supported properly. For example, if your device can
139 * only drive the low 24-bits during PCI bus mastering, then
140 * you would pass 0x00ffffff as the mask to this function.
141 */
142static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
143{
144 return 1;
145}
146
147#ifdef CONFIG_PCI 50#ifdef CONFIG_PCI
148static inline void pci_dma_burst_advice(struct pci_dev *pdev, 51static inline void pci_dma_burst_advice(struct pci_dev *pdev,
149 enum pci_dma_burst_strategy *strat, 52 enum pci_dma_burst_strategy *strat,
@@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
154} 57}
155#endif 58#endif
156 59
157#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
158
159static inline int pci_dma_mapping_error(struct pci_dev *pdev,
160 dma_addr_t dma_addr)
161{
162 return (dma_addr == PCI_DMA_ERROR_CODE);
163}
164
165struct device_node; 60struct device_node;
166extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); 61extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
167 62
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 7a1e3566e59..5cc9f6aa549 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
35 */ 35 */
36#define PCI_DMA_BUS_IS_PHYS (0) 36#define PCI_DMA_BUS_IS_PHYS (0)
37 37
38static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
39 dma_addr_t *dma_handle)
40{
41 return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
42}
43
44static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
45 void *vaddr, dma_addr_t dma_handle)
46{
47 return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
48}
49
50static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
51 size_t size, int direction)
52{
53 return dma_map_single(&pdev->dev, ptr, size,
54 (enum dma_data_direction) direction);
55}
56
57static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
58 size_t size, int direction)
59{
60 dma_unmap_single(&pdev->dev, dma_addr, size,
61 (enum dma_data_direction) direction);
62}
63
64#define pci_map_page(dev, page, off, size, dir) \
65 pci_map_single(dev, (page_address(page) + (off)), size, dir)
66#define pci_unmap_page(dev,addr,sz,dir) \
67 pci_unmap_single(dev,addr,sz,dir)
68
69/* pci_unmap_{single,page} is not a nop, thus... */ 38/* pci_unmap_{single,page} is not a nop, thus... */
70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 39#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
71 dma_addr_t ADDR_NAME; 40 dma_addr_t ADDR_NAME;
@@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 49#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
81 (((PTR)->LEN_NAME) = (VAL)) 50 (((PTR)->LEN_NAME) = (VAL))
82 51
83static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
84 int nents, int direction)
85{
86 return dma_map_sg(&pdev->dev, sg, nents,
87 (enum dma_data_direction) direction);
88}
89
90static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
91 int nents, int direction)
92{
93 dma_unmap_sg(&pdev->dev, sg, nents,
94 (enum dma_data_direction) direction);
95}
96
97static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
98 dma_addr_t dma_handle,
99 size_t size, int direction)
100{
101 dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
102 (enum dma_data_direction) direction);
103}
104
105static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
106 dma_addr_t dma_handle,
107 size_t size, int direction)
108{
109 /* No flushing needed to sync cpu writes to the device. */
110}
111
112static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
113 struct scatterlist *sg,
114 int nents, int direction)
115{
116 dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
117 (enum dma_data_direction) direction);
118}
119
120static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
121 struct scatterlist *sg,
122 int nelems, int direction)
123{
124 /* No flushing needed to sync cpu writes to the device. */
125}
126
127/* Return whether the given PCI device DMA address mask can
128 * be supported properly. For example, if your device can
129 * only drive the low 24-bits during PCI bus mastering, then
130 * you would pass 0x00ffffff as the mask to this function.
131 */
132extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
133
134/* PCI IOMMU mapping bypass support. */ 52/* PCI IOMMU mapping bypass support. */
135 53
136/* PCI 64-bit addressing works for all slots on all controller 54/* PCI 64-bit addressing works for all slots on all controller
@@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
140#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) 58#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
141#define PCI64_ADDR_BASE 0xfffc000000000000UL 59#define PCI64_ADDR_BASE 0xfffc000000000000UL
142 60
143static inline int pci_dma_mapping_error(struct pci_dev *pdev,
144 dma_addr_t dma_addr)
145{
146 return dma_mapping_error(&pdev->dev, dma_addr);
147}
148
149#ifdef CONFIG_PCI 61#ifdef CONFIG_PCI
150static inline void pci_dma_burst_advice(struct pci_dev *pdev, 62static inline void pci_dma_burst_advice(struct pci_dev *pdev,
151 enum pci_dma_burst_strategy *strat, 63 enum pci_dma_burst_strategy *strat,
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index a5d50dac735..b2fa3127f60 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -13,13 +13,17 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#endif 14#endif
15 15
16#include "dma.h" 16/*
17 17 * Return whether the given PCI device DMA address mask can be
18 * supported properly. For example, if your device can only drive the
19 * low 24-bits during PCI bus mastering, then you would pass
20 * 0x00ffffff as the mask to this function.
21 */
18int dma_supported(struct device *dev, u64 mask) 22int dma_supported(struct device *dev, u64 mask)
19{ 23{
20#ifdef CONFIG_PCI 24#ifdef CONFIG_PCI
21 if (dev->bus == &pci_bus_type) 25 if (dev->bus == &pci_bus_type)
22 return pci_dma_supported(to_pci_dev(dev), mask); 26 return 1;
23#endif 27#endif
24 return 0; 28 return 0;
25} 29}
@@ -34,148 +38,3 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
34 return -EOPNOTSUPP; 38 return -EOPNOTSUPP;
35} 39}
36EXPORT_SYMBOL(dma_set_mask); 40EXPORT_SYMBOL(dma_set_mask);
37
38static void *dma32_alloc_coherent(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flag)
40{
41#ifdef CONFIG_PCI
42 if (dev->bus == &pci_bus_type)
43 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
44#endif
45 return sbus_alloc_consistent(dev, size, dma_handle);
46}
47
48static void dma32_free_coherent(struct device *dev, size_t size,
49 void *cpu_addr, dma_addr_t dma_handle)
50{
51#ifdef CONFIG_PCI
52 if (dev->bus == &pci_bus_type) {
53 pci_free_consistent(to_pci_dev(dev), size,
54 cpu_addr, dma_handle);
55 return;
56 }
57#endif
58 sbus_free_consistent(dev, size, cpu_addr, dma_handle);
59}
60
61static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
62 unsigned long offset, size_t size,
63 enum dma_data_direction direction,
64 struct dma_attrs *attrs)
65{
66#ifdef CONFIG_PCI
67 if (dev->bus == &pci_bus_type)
68 return pci_map_page(to_pci_dev(dev), page, offset,
69 size, (int)direction);
70#endif
71 return sbus_map_page(dev, page, offset, size, (int)direction);
72}
73
74static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
75 size_t size, enum dma_data_direction direction,
76 struct dma_attrs *attrs)
77{
78#ifdef CONFIG_PCI
79 if (dev->bus == &pci_bus_type) {
80 pci_unmap_page(to_pci_dev(dev), dma_address,
81 size, (int)direction);
82 return;
83 }
84#endif
85 sbus_unmap_page(dev, dma_address, size, (int)direction);
86}
87
88static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
89 int nents, enum dma_data_direction direction,
90 struct dma_attrs *attrs)
91{
92#ifdef CONFIG_PCI
93 if (dev->bus == &pci_bus_type)
94 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
95#endif
96 return sbus_map_sg(dev, sg, nents, direction);
97}
98
99void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
100 int nents, enum dma_data_direction direction,
101 struct dma_attrs *attrs)
102{
103#ifdef CONFIG_PCI
104 if (dev->bus == &pci_bus_type) {
105 pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction);
106 return;
107 }
108#endif
109 sbus_unmap_sg(dev, sg, nents, (int)direction);
110}
111
112static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
113 size_t size,
114 enum dma_data_direction direction)
115{
116#ifdef CONFIG_PCI
117 if (dev->bus == &pci_bus_type) {
118 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
119 size, (int)direction);
120 return;
121 }
122#endif
123 sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
124}
125
126static void dma32_sync_single_for_device(struct device *dev,
127 dma_addr_t dma_handle, size_t size,
128 enum dma_data_direction direction)
129{
130#ifdef CONFIG_PCI
131 if (dev->bus == &pci_bus_type) {
132 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
133 size, (int)direction);
134 return;
135 }
136#endif
137 sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
138}
139
140static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
141 int nelems, enum dma_data_direction direction)
142{
143#ifdef CONFIG_PCI
144 if (dev->bus == &pci_bus_type) {
145 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg,
146 nelems, (int)direction);
147 return;
148 }
149#endif
150 BUG();
151}
152
153static void dma32_sync_sg_for_device(struct device *dev,
154 struct scatterlist *sg, int nelems,
155 enum dma_data_direction direction)
156{
157#ifdef CONFIG_PCI
158 if (dev->bus == &pci_bus_type) {
159 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg,
160 nelems, (int)direction);
161 return;
162 }
163#endif
164 BUG();
165}
166
167static struct dma_map_ops dma32_dma_ops = {
168 .alloc_coherent = dma32_alloc_coherent,
169 .free_coherent = dma32_free_coherent,
170 .map_page = dma32_map_page,
171 .unmap_page = dma32_unmap_page,
172 .map_sg = dma32_map_sg,
173 .unmap_sg = dma32_unmap_sg,
174 .sync_single_for_cpu = dma32_sync_single_for_cpu,
175 .sync_single_for_device = dma32_sync_single_for_device,
176 .sync_sg_for_cpu = dma32_sync_sg_for_cpu,
177 .sync_sg_for_device = dma32_sync_sg_for_device,
178};
179
180struct dma_map_ops *dma_ops = &dma32_dma_ops;
181EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h
deleted file mode 100644
index 680351ee0d4..00000000000
--- a/arch/sparc/kernel/dma.h
+++ /dev/null
@@ -1,14 +0,0 @@
1void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp);
2void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba);
3dma_addr_t sbus_map_page(struct device *dev, struct page *page,
4 unsigned long offset, size_t len, int direction);
5void sbus_unmap_page(struct device *dev, dma_addr_t ba,
6 size_t n, int direction);
7int sbus_map_sg(struct device *dev, struct scatterlist *sg,
8 int n, int direction);
9void sbus_unmap_sg(struct device *dev, struct scatterlist *sg,
10 int n, int direction);
11void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
12 size_t size, int direction);
13void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba,
14 size_t size, int direction);
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 74b289cab55..7690cc219ec 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -840,6 +840,8 @@ static struct dma_map_ops sun4u_dma_ops = {
840struct dma_map_ops *dma_ops = &sun4u_dma_ops; 840struct dma_map_ops *dma_ops = &sun4u_dma_ops;
841EXPORT_SYMBOL(dma_ops); 841EXPORT_SYMBOL(dma_ops);
842 842
843extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
844
843int dma_supported(struct device *dev, u64 device_mask) 845int dma_supported(struct device *dev, u64 device_mask)
844{ 846{
845 struct iommu *iommu = dev->archdata.iommu; 847 struct iommu *iommu = dev->archdata.iommu;
@@ -853,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask)
853 855
854#ifdef CONFIG_PCI 856#ifdef CONFIG_PCI
855 if (dev->bus == &pci_bus_type) 857 if (dev->bus == &pci_bus_type)
856 return pci_dma_supported(to_pci_dev(dev), device_mask); 858 return pci64_dma_supported(to_pci_dev(dev), device_mask);
857#endif 859#endif
858 860
859 return 0; 861 return 0;
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 39ff1e0c518..1eb60438965 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -48,8 +48,6 @@
48#include <asm/iommu.h> 48#include <asm/iommu.h>
49#include <asm/io-unit.h> 49#include <asm/io-unit.h>
50 50
51#include "dma.h"
52
53#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ 51#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
54 52
55static struct resource *_sparc_find_resource(struct resource *r, 53static struct resource *_sparc_find_resource(struct resource *r,
@@ -246,7 +244,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
246 * Typically devices use them for control blocks. 244 * Typically devices use them for control blocks.
247 * CPU may access them without any explicit flushing. 245 * CPU may access them without any explicit flushing.
248 */ 246 */
249void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) 247static void *sbus_alloc_coherent(struct device *dev, size_t len,
248 dma_addr_t *dma_addrp, gfp_t gfp)
250{ 249{
251 struct of_device *op = to_of_device(dev); 250 struct of_device *op = to_of_device(dev);
252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 251 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
@@ -299,7 +298,8 @@ err_nopages:
299 return NULL; 298 return NULL;
300} 299}
301 300
302void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) 301static void sbus_free_coherent(struct device *dev, size_t n, void *p,
302 dma_addr_t ba)
303{ 303{
304 struct resource *res; 304 struct resource *res;
305 struct page *pgv; 305 struct page *pgv;
@@ -317,7 +317,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
317 317
318 n = (n + PAGE_SIZE-1) & PAGE_MASK; 318 n = (n + PAGE_SIZE-1) & PAGE_MASK;
319 if ((res->end-res->start)+1 != n) { 319 if ((res->end-res->start)+1 != n) {
320 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", 320 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
321 (long)((res->end-res->start)+1), n); 321 (long)((res->end-res->start)+1), n);
322 return; 322 return;
323 } 323 }
@@ -337,8 +337,10 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
337 * CPU view of this memory may be inconsistent with 337 * CPU view of this memory may be inconsistent with
338 * a device view and explicit flushing is necessary. 338 * a device view and explicit flushing is necessary.
339 */ 339 */
340dma_addr_t sbus_map_page(struct device *dev, struct page *page, 340static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
341 unsigned long offset, size_t len, int direction) 341 unsigned long offset, size_t len,
342 enum dma_data_direction dir,
343 struct dma_attrs *attrs)
342{ 344{
343 void *va = page_address(page) + offset; 345 void *va = page_address(page) + offset;
344 346
@@ -353,12 +355,14 @@ dma_addr_t sbus_map_page(struct device *dev, struct page *page,
353 return mmu_get_scsi_one(dev, va, len); 355 return mmu_get_scsi_one(dev, va, len);
354} 356}
355 357
356void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, int direction) 358static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
359 enum dma_data_direction dir, struct dma_attrs *attrs)
357{ 360{
358 mmu_release_scsi_one(dev, ba, n); 361 mmu_release_scsi_one(dev, ba, n);
359} 362}
360 363
361int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 364static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
365 enum dma_data_direction dir, struct dma_attrs *attrs)
362{ 366{
363 mmu_get_scsi_sgl(dev, sg, n); 367 mmu_get_scsi_sgl(dev, sg, n);
364 368
@@ -369,19 +373,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
369 return n; 373 return n;
370} 374}
371 375
372void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 376static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
377 enum dma_data_direction dir, struct dma_attrs *attrs)
373{ 378{
374 mmu_release_scsi_sgl(dev, sg, n); 379 mmu_release_scsi_sgl(dev, sg, n);
375} 380}
376 381
377void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) 382static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
383 int n, enum dma_data_direction dir)
378{ 384{
385 BUG();
379} 386}
380 387
381void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) 388static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
389 int n, enum dma_data_direction dir)
382{ 390{
391 BUG();
383} 392}
384 393
394struct dma_map_ops sbus_dma_ops = {
395 .alloc_coherent = sbus_alloc_coherent,
396 .free_coherent = sbus_free_coherent,
397 .map_page = sbus_map_page,
398 .unmap_page = sbus_unmap_page,
399 .map_sg = sbus_map_sg,
400 .unmap_sg = sbus_unmap_sg,
401 .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
402 .sync_sg_for_device = sbus_sync_sg_for_device,
403};
404
405struct dma_map_ops *dma_ops = &sbus_dma_ops;
406EXPORT_SYMBOL(dma_ops);
407
385static int __init sparc_register_ioport(void) 408static int __init sparc_register_ioport(void)
386{ 409{
387 register_proc_sparc_ioport(); 410 register_proc_sparc_ioport();
@@ -398,7 +421,8 @@ arch_initcall(sparc_register_ioport);
398/* Allocate and map kernel buffer using consistent mode DMA for a device. 421/* Allocate and map kernel buffer using consistent mode DMA for a device.
399 * hwdev should be valid struct pci_dev pointer for PCI devices. 422 * hwdev should be valid struct pci_dev pointer for PCI devices.
400 */ 423 */
401void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) 424static void *pci32_alloc_coherent(struct device *dev, size_t len,
425 dma_addr_t *pba, gfp_t gfp)
402{ 426{
403 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 427 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
404 unsigned long va; 428 unsigned long va;
@@ -442,7 +466,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
442 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ 466 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
443 return (void *) res->start; 467 return (void *) res->start;
444} 468}
445EXPORT_SYMBOL(pci_alloc_consistent);
446 469
447/* Free and unmap a consistent DMA buffer. 470/* Free and unmap a consistent DMA buffer.
448 * cpu_addr is what was returned from pci_alloc_consistent, 471 * cpu_addr is what was returned from pci_alloc_consistent,
@@ -452,7 +475,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
452 * References to the memory and mappings associated with cpu_addr/dma_addr 475 * References to the memory and mappings associated with cpu_addr/dma_addr
453 * past this call are illegal. 476 * past this call are illegal.
454 */ 477 */
455void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) 478static void pci32_free_coherent(struct device *dev, size_t n, void *p,
479 dma_addr_t ba)
456{ 480{
457 struct resource *res; 481 struct resource *res;
458 unsigned long pgp; 482 unsigned long pgp;
@@ -484,60 +508,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
484 508
485 free_pages(pgp, get_order(n)); 509 free_pages(pgp, get_order(n));
486} 510}
487EXPORT_SYMBOL(pci_free_consistent);
488
489/* Map a single buffer of the indicated size for DMA in streaming mode.
490 * The 32-bit bus address to use is returned.
491 *
492 * Once the device is given the dma address, the device owns this memory
493 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
494 */
495dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
496 int direction)
497{
498 BUG_ON(direction == PCI_DMA_NONE);
499 /* IIep is write-through, not flushing. */
500 return virt_to_phys(ptr);
501}
502EXPORT_SYMBOL(pci_map_single);
503
504/* Unmap a single streaming mode DMA translation. The dma_addr and size
505 * must match what was provided for in a previous pci_map_single call. All
506 * other usages are undefined.
507 *
508 * After this call, reads by the cpu to the buffer are guaranteed to see
509 * whatever the device wrote there.
510 */
511void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
512 int direction)
513{
514 BUG_ON(direction == PCI_DMA_NONE);
515 if (direction != PCI_DMA_TODEVICE) {
516 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
517 (size + PAGE_SIZE-1) & PAGE_MASK);
518 }
519}
520EXPORT_SYMBOL(pci_unmap_single);
521 511
522/* 512/*
523 * Same as pci_map_single, but with pages. 513 * Same as pci_map_single, but with pages.
524 */ 514 */
525dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, 515static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
526 unsigned long offset, size_t size, int direction) 516 unsigned long offset, size_t size,
517 enum dma_data_direction dir,
518 struct dma_attrs *attrs)
527{ 519{
528 BUG_ON(direction == PCI_DMA_NONE);
529 /* IIep is write-through, not flushing. */ 520 /* IIep is write-through, not flushing. */
530 return page_to_phys(page) + offset; 521 return page_to_phys(page) + offset;
531} 522}
532EXPORT_SYMBOL(pci_map_page);
533
534void pci_unmap_page(struct pci_dev *hwdev,
535 dma_addr_t dma_address, size_t size, int direction)
536{
537 BUG_ON(direction == PCI_DMA_NONE);
538 /* mmu_inval_dma_area XXX */
539}
540EXPORT_SYMBOL(pci_unmap_page);
541 523
542/* Map a set of buffers described by scatterlist in streaming 524/* Map a set of buffers described by scatterlist in streaming
543 * mode for DMA. This is the scather-gather version of the 525 * mode for DMA. This is the scather-gather version of the
@@ -554,13 +536,13 @@ EXPORT_SYMBOL(pci_unmap_page);
554 * Device ownership issues as mentioned above for pci_map_single are 536 * Device ownership issues as mentioned above for pci_map_single are
555 * the same here. 537 * the same here.
556 */ 538 */
557int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 539static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
558 int direction) 540 int nents, enum dma_data_direction dir,
541 struct dma_attrs *attrs)
559{ 542{
560 struct scatterlist *sg; 543 struct scatterlist *sg;
561 int n; 544 int n;
562 545
563 BUG_ON(direction == PCI_DMA_NONE);
564 /* IIep is write-through, not flushing. */ 546 /* IIep is write-through, not flushing. */
565 for_each_sg(sgl, sg, nents, n) { 547 for_each_sg(sgl, sg, nents, n) {
566 BUG_ON(page_address(sg_page(sg)) == NULL); 548 BUG_ON(page_address(sg_page(sg)) == NULL);
@@ -569,20 +551,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
569 } 551 }
570 return nents; 552 return nents;
571} 553}
572EXPORT_SYMBOL(pci_map_sg);
573 554
574/* Unmap a set of streaming mode DMA translations. 555/* Unmap a set of streaming mode DMA translations.
575 * Again, cpu read rules concerning calls here are the same as for 556 * Again, cpu read rules concerning calls here are the same as for
576 * pci_unmap_single() above. 557 * pci_unmap_single() above.
577 */ 558 */
578void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 559static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
579 int direction) 560 int nents, enum dma_data_direction dir,
561 struct dma_attrs *attrs)
580{ 562{
581 struct scatterlist *sg; 563 struct scatterlist *sg;
582 int n; 564 int n;
583 565
584 BUG_ON(direction == PCI_DMA_NONE); 566 if (dir != PCI_DMA_TODEVICE) {
585 if (direction != PCI_DMA_TODEVICE) {
586 for_each_sg(sgl, sg, nents, n) { 567 for_each_sg(sgl, sg, nents, n) {
587 BUG_ON(page_address(sg_page(sg)) == NULL); 568 BUG_ON(page_address(sg_page(sg)) == NULL);
588 mmu_inval_dma_area( 569 mmu_inval_dma_area(
@@ -591,7 +572,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
591 } 572 }
592 } 573 }
593} 574}
594EXPORT_SYMBOL(pci_unmap_sg);
595 575
596/* Make physical memory consistent for a single 576/* Make physical memory consistent for a single
597 * streaming mode DMA translation before or after a transfer. 577 * streaming mode DMA translation before or after a transfer.
@@ -603,25 +583,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
603 * must first perform a pci_dma_sync_for_device, and then the 583 * must first perform a pci_dma_sync_for_device, and then the
604 * device again owns the buffer. 584 * device again owns the buffer.
605 */ 585 */
606void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 586static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
587 size_t size, enum dma_data_direction dir)
607{ 588{
608 BUG_ON(direction == PCI_DMA_NONE); 589 if (dir != PCI_DMA_TODEVICE) {
609 if (direction != PCI_DMA_TODEVICE) {
610 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 590 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
611 (size + PAGE_SIZE-1) & PAGE_MASK); 591 (size + PAGE_SIZE-1) & PAGE_MASK);
612 } 592 }
613} 593}
614EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
615 594
616void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 595static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
596 size_t size, enum dma_data_direction dir)
617{ 597{
618 BUG_ON(direction == PCI_DMA_NONE); 598 if (dir != PCI_DMA_TODEVICE) {
619 if (direction != PCI_DMA_TODEVICE) {
620 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 599 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
621 (size + PAGE_SIZE-1) & PAGE_MASK); 600 (size + PAGE_SIZE-1) & PAGE_MASK);
622 } 601 }
623} 602}
624EXPORT_SYMBOL(pci_dma_sync_single_for_device);
625 603
626/* Make physical memory consistent for a set of streaming 604/* Make physical memory consistent for a set of streaming
627 * mode DMA translations after a transfer. 605 * mode DMA translations after a transfer.
@@ -629,13 +607,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
629 * The same as pci_dma_sync_single_* but for a scatter-gather list, 607 * The same as pci_dma_sync_single_* but for a scatter-gather list,
630 * same rules and usage. 608 * same rules and usage.
631 */ 609 */
632void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 610static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
611 int nents, enum dma_data_direction dir)
633{ 612{
634 struct scatterlist *sg; 613 struct scatterlist *sg;
635 int n; 614 int n;
636 615
637 BUG_ON(direction == PCI_DMA_NONE); 616 if (dir != PCI_DMA_TODEVICE) {
638 if (direction != PCI_DMA_TODEVICE) {
639 for_each_sg(sgl, sg, nents, n) { 617 for_each_sg(sgl, sg, nents, n) {
640 BUG_ON(page_address(sg_page(sg)) == NULL); 618 BUG_ON(page_address(sg_page(sg)) == NULL);
641 mmu_inval_dma_area( 619 mmu_inval_dma_area(
@@ -644,15 +622,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
644 } 622 }
645 } 623 }
646} 624}
647EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
648 625
649void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 626static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
627 int nents, enum dma_data_direction dir)
650{ 628{
651 struct scatterlist *sg; 629 struct scatterlist *sg;
652 int n; 630 int n;
653 631
654 BUG_ON(direction == PCI_DMA_NONE); 632 if (dir != PCI_DMA_TODEVICE) {
655 if (direction != PCI_DMA_TODEVICE) {
656 for_each_sg(sgl, sg, nents, n) { 633 for_each_sg(sgl, sg, nents, n) {
657 BUG_ON(page_address(sg_page(sg)) == NULL); 634 BUG_ON(page_address(sg_page(sg)) == NULL);
658 mmu_inval_dma_area( 635 mmu_inval_dma_area(
@@ -661,7 +638,20 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
661 } 638 }
662 } 639 }
663} 640}
664EXPORT_SYMBOL(pci_dma_sync_sg_for_device); 641
642struct dma_map_ops pci32_dma_ops = {
643 .alloc_coherent = pci32_alloc_coherent,
644 .free_coherent = pci32_free_coherent,
645 .map_page = pci32_map_page,
646 .map_sg = pci32_map_sg,
647 .unmap_sg = pci32_unmap_sg,
648 .sync_single_for_cpu = pci32_sync_single_for_cpu,
649 .sync_single_for_device = pci32_sync_single_for_device,
650 .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
651 .sync_sg_for_device = pci32_sync_sg_for_device,
652};
653EXPORT_SYMBOL(pci32_dma_ops);
654
665#endif /* CONFIG_PCI */ 655#endif /* CONFIG_PCI */
666 656
667#ifdef CONFIG_PROC_FS 657#ifdef CONFIG_PROC_FS
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 57859ad2354..c6864866280 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
1039 pci_dev_put(ali_isa_bridge); 1039 pci_dev_put(ali_isa_bridge);
1040} 1040}
1041 1041
1042int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) 1042int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
1043{ 1043{
1044 u64 dma_addr_mask; 1044 u64 dma_addr_mask;
1045 1045