aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-01-05 09:36:13 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-06 08:06:50 -0500
commitb7ea6e951833a3add60fd47f2de6870b5d0589b3 (patch)
tree98f3b81d5f9a452e1caed649e6fb4362a4759f26
parent4d9b977ca674dd40cfc1409a75cb73fca2cee423 (diff)
convert the DMA API to use dma_ops
This writes asm/dma-mapping.h to convert the DMA API to use dma_ops. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/ia64/include/asm/dma-mapping.h113
1 files changed, 77 insertions, 36 deletions
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index eeb2aa36949a..5298f4064e3c 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -65,52 +65,92 @@ extern struct dma_mapping_ops *dma_ops;
65extern struct ia64_machine_vector ia64_mv; 65extern struct ia64_machine_vector ia64_mv;
66extern void set_iommu_machvec(void); 66extern void set_iommu_machvec(void);
67 67
68#define dma_alloc_coherent(dev, size, handle, gfp) \ 68static inline void *dma_alloc_coherent(struct device *dev, size_t size,
69 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) 69 dma_addr_t *daddr, gfp_t gfp)
70{
71 return dma_ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
72}
70 73
71/* coherent mem. is cheap */ 74static inline void dma_free_coherent(struct device *dev, size_t size,
72static inline void * 75 void *caddr, dma_addr_t daddr)
73dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
74 gfp_t flag)
75{ 76{
76 return dma_alloc_coherent(dev, size, dma_handle, flag); 77 dma_ops->free_coherent(dev, size, caddr, daddr);
77} 78}
78#define dma_free_coherent platform_dma_free_coherent 79
79static inline void 80#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
80dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, 81#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
81 dma_addr_t dma_handle) 82
83static inline dma_addr_t dma_map_single_attrs(struct device *dev,
84 void *caddr, size_t size,
85 enum dma_data_direction dir,
86 struct dma_attrs *attrs)
87{
88 return dma_ops->map_single_attrs(dev, caddr, size, dir, attrs);
89}
90
91static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
92 size_t size,
93 enum dma_data_direction dir,
94 struct dma_attrs *attrs)
82{ 95{
83 dma_free_coherent(dev, size, cpu_addr, dma_handle); 96 dma_ops->unmap_single_attrs(dev, daddr, size, dir, attrs);
84} 97}
85#define dma_map_single_attrs platform_dma_map_single_attrs 98
86static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 99#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
87 size_t size, int dir) 100#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
101
102static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
103 int nents, enum dma_data_direction dir,
104 struct dma_attrs *attrs)
88{ 105{
89 return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); 106 return dma_ops->map_sg_attrs(dev, sgl, nents, dir, attrs);
90} 107}
91#define dma_map_sg_attrs platform_dma_map_sg_attrs 108
92static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, 109static inline void dma_unmap_sg_attrs(struct device *dev,
93 int nents, int dir) 110 struct scatterlist *sgl, int nents,
111 enum dma_data_direction dir,
112 struct dma_attrs *attrs)
94{ 113{
95 return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); 114 dma_ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs);
96} 115}
97#define dma_unmap_single_attrs platform_dma_unmap_single_attrs 116
98static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, 117#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
99 size_t size, int dir) 118#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
119
120static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
121 size_t size,
122 enum dma_data_direction dir)
100{ 123{
101 return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); 124 dma_ops->sync_single_for_cpu(dev, daddr, size, dir);
102} 125}
103#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs 126
104static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 127static inline void dma_sync_sg_for_cpu(struct device *dev,
105 int nents, int dir) 128 struct scatterlist *sgl,
129 int nents, enum dma_data_direction dir)
106{ 130{
107 return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); 131 dma_ops->sync_sg_for_cpu(dev, sgl, nents, dir);
132}
133
134static inline void dma_sync_single_for_device(struct device *dev,
135 dma_addr_t daddr,
136 size_t size,
137 enum dma_data_direction dir)
138{
139 dma_ops->sync_single_for_device(dev, daddr, size, dir);
140}
141
142static inline void dma_sync_sg_for_device(struct device *dev,
143 struct scatterlist *sgl,
144 int nents,
145 enum dma_data_direction dir)
146{
147 dma_ops->sync_sg_for_device(dev, sgl, nents, dir);
148}
149
150static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
151{
152 return dma_ops->mapping_error(dev, daddr);
108} 153}
109#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
110#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
111#define dma_sync_single_for_device platform_dma_sync_single_for_device
112#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
113#define dma_mapping_error platform_dma_mapping_error
114 154
115#define dma_map_page(dev, pg, off, size, dir) \ 155#define dma_map_page(dev, pg, off, size, dir) \
116 dma_map_single(dev, page_address(pg) + (off), (size), (dir)) 156 dma_map_single(dev, page_address(pg) + (off), (size), (dir))
@@ -127,7 +167,10 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
127#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ 167#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
128 dma_sync_single_for_device(dev, dma_handle, size, dir) 168 dma_sync_single_for_device(dev, dma_handle, size, dir)
129 169
130#define dma_supported platform_dma_supported 170static inline int dma_supported(struct device *dev, u64 mask)
171{
172 return dma_ops->dma_supported_op(dev, mask);
173}
131 174
132static inline int 175static inline int
133dma_set_mask (struct device *dev, u64 mask) 176dma_set_mask (struct device *dev, u64 mask)
@@ -158,6 +201,4 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
158 return dma_ops; 201 return dma_ops;
159} 202}
160 203
161
162
163#endif /* _ASM_IA64_DMA_MAPPING_H */ 204#endif /* _ASM_IA64_DMA_MAPPING_H */