aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-01-05 09:36:16 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-06 08:06:51 -0500
commitc190ab0b2a5fb5cc97576c5f04f4419b6cf8dc8e (patch)
tree8bc75bb8a30211dbbcc4a3a30a1be2b831392a96 /arch
parentcdc28d59a31e3fd711982bd07600f3e5b449b9f7 (diff)
add dma_get_ops to struct ia64_machine_vector
This adds dma_get_ops hook to struct ia64_machine_vector. We use dma_get_ops() in arch/ia64/kernel/dma-mapping.c, which simply returns the global dma_ops. This is for removing hwsw_dma_ops. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/include/asm/dma-mapping.h41
-rw-r--r--arch/ia64/include/asm/machvec.h8
-rw-r--r--arch/ia64/kernel/dma-mapping.c6
-rw-r--r--arch/ia64/kernel/pci-dma.c2
4 files changed, 39 insertions, 18 deletions
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 5298f4064e3c..bac3159379f7 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -68,13 +68,15 @@ extern void set_iommu_machvec(void);
68static inline void *dma_alloc_coherent(struct device *dev, size_t size, 68static inline void *dma_alloc_coherent(struct device *dev, size_t size,
69 dma_addr_t *daddr, gfp_t gfp) 69 dma_addr_t *daddr, gfp_t gfp)
70{ 70{
71 return dma_ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA); 71 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
72 return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
72} 73}
73 74
74static inline void dma_free_coherent(struct device *dev, size_t size, 75static inline void dma_free_coherent(struct device *dev, size_t size,
75 void *caddr, dma_addr_t daddr) 76 void *caddr, dma_addr_t daddr)
76{ 77{
77 dma_ops->free_coherent(dev, size, caddr, daddr); 78 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
79 ops->free_coherent(dev, size, caddr, daddr);
78} 80}
79 81
80#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 82#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
@@ -85,7 +87,8 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
85 enum dma_data_direction dir, 87 enum dma_data_direction dir,
86 struct dma_attrs *attrs) 88 struct dma_attrs *attrs)
87{ 89{
88 return dma_ops->map_single_attrs(dev, caddr, size, dir, attrs); 90 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
91 return ops->map_single_attrs(dev, caddr, size, dir, attrs);
89} 92}
90 93
91static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, 94static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
@@ -93,7 +96,8 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
93 enum dma_data_direction dir, 96 enum dma_data_direction dir,
94 struct dma_attrs *attrs) 97 struct dma_attrs *attrs)
95{ 98{
96 dma_ops->unmap_single_attrs(dev, daddr, size, dir, attrs); 99 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
100 ops->unmap_single_attrs(dev, daddr, size, dir, attrs);
97} 101}
98 102
99#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) 103#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
@@ -103,7 +107,8 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
103 int nents, enum dma_data_direction dir, 107 int nents, enum dma_data_direction dir,
104 struct dma_attrs *attrs) 108 struct dma_attrs *attrs)
105{ 109{
106 return dma_ops->map_sg_attrs(dev, sgl, nents, dir, attrs); 110 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
111 return ops->map_sg_attrs(dev, sgl, nents, dir, attrs);
107} 112}
108 113
109static inline void dma_unmap_sg_attrs(struct device *dev, 114static inline void dma_unmap_sg_attrs(struct device *dev,
@@ -111,7 +116,8 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
111 enum dma_data_direction dir, 116 enum dma_data_direction dir,
112 struct dma_attrs *attrs) 117 struct dma_attrs *attrs)
113{ 118{
114 dma_ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs); 119 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
120 ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs);
115} 121}
116 122
117#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) 123#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
@@ -121,14 +127,16 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
121 size_t size, 127 size_t size,
122 enum dma_data_direction dir) 128 enum dma_data_direction dir)
123{ 129{
124 dma_ops->sync_single_for_cpu(dev, daddr, size, dir); 130 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
131 ops->sync_single_for_cpu(dev, daddr, size, dir);
125} 132}
126 133
127static inline void dma_sync_sg_for_cpu(struct device *dev, 134static inline void dma_sync_sg_for_cpu(struct device *dev,
128 struct scatterlist *sgl, 135 struct scatterlist *sgl,
129 int nents, enum dma_data_direction dir) 136 int nents, enum dma_data_direction dir)
130{ 137{
131 dma_ops->sync_sg_for_cpu(dev, sgl, nents, dir); 138 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
139 ops->sync_sg_for_cpu(dev, sgl, nents, dir);
132} 140}
133 141
134static inline void dma_sync_single_for_device(struct device *dev, 142static inline void dma_sync_single_for_device(struct device *dev,
@@ -136,7 +144,8 @@ static inline void dma_sync_single_for_device(struct device *dev,
136 size_t size, 144 size_t size,
137 enum dma_data_direction dir) 145 enum dma_data_direction dir)
138{ 146{
139 dma_ops->sync_single_for_device(dev, daddr, size, dir); 147 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
148 ops->sync_single_for_device(dev, daddr, size, dir);
140} 149}
141 150
142static inline void dma_sync_sg_for_device(struct device *dev, 151static inline void dma_sync_sg_for_device(struct device *dev,
@@ -144,12 +153,14 @@ static inline void dma_sync_sg_for_device(struct device *dev,
144 int nents, 153 int nents,
145 enum dma_data_direction dir) 154 enum dma_data_direction dir)
146{ 155{
147 dma_ops->sync_sg_for_device(dev, sgl, nents, dir); 156 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
157 ops->sync_sg_for_device(dev, sgl, nents, dir);
148} 158}
149 159
150static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) 160static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
151{ 161{
152 return dma_ops->mapping_error(dev, daddr); 162 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
163 return ops->mapping_error(dev, daddr);
153} 164}
154 165
155#define dma_map_page(dev, pg, off, size, dir) \ 166#define dma_map_page(dev, pg, off, size, dir) \
@@ -169,7 +180,8 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
169 180
170static inline int dma_supported(struct device *dev, u64 mask) 181static inline int dma_supported(struct device *dev, u64 mask)
171{ 182{
172 return dma_ops->dma_supported_op(dev, mask); 183 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
184 return ops->dma_supported_op(dev, mask);
173} 185}
174 186
175static inline int 187static inline int
@@ -196,9 +208,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
196 208
197#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ 209#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
198 210
199static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
200{
201 return dma_ops;
202}
203
204#endif /* _ASM_IA64_DMA_MAPPING_H */ 211#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 6be3010d746a..95e1708fa4e3 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -45,6 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
45 45
46/* DMA-mapping interface: */ 46/* DMA-mapping interface: */
47typedef void ia64_mv_dma_init (void); 47typedef void ia64_mv_dma_init (void);
48typedef struct dma_mapping_ops *ia64_mv_dma_get_ops(struct device *);
48 49
49/* 50/*
50 * WARNING: The legacy I/O space is _architected_. Platforms are 51 * WARNING: The legacy I/O space is _architected_. Platforms are
@@ -130,6 +131,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
130# define platform_global_tlb_purge ia64_mv.global_tlb_purge 131# define platform_global_tlb_purge ia64_mv.global_tlb_purge
131# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish 132# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
132# define platform_dma_init ia64_mv.dma_init 133# define platform_dma_init ia64_mv.dma_init
134# define platform_dma_get_ops ia64_mv.dma_get_ops
133# define platform_irq_to_vector ia64_mv.irq_to_vector 135# define platform_irq_to_vector ia64_mv.irq_to_vector
134# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq 136# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
135# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem 137# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
@@ -172,6 +174,7 @@ struct ia64_machine_vector {
172 ia64_mv_global_tlb_purge_t *global_tlb_purge; 174 ia64_mv_global_tlb_purge_t *global_tlb_purge;
173 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; 175 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
174 ia64_mv_dma_init *dma_init; 176 ia64_mv_dma_init *dma_init;
177 ia64_mv_dma_get_ops *dma_get_ops;
175 ia64_mv_irq_to_vector *irq_to_vector; 178 ia64_mv_irq_to_vector *irq_to_vector;
176 ia64_mv_local_vector_to_irq *local_vector_to_irq; 179 ia64_mv_local_vector_to_irq *local_vector_to_irq;
177 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; 180 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
@@ -210,6 +213,7 @@ struct ia64_machine_vector {
210 platform_global_tlb_purge, \ 213 platform_global_tlb_purge, \
211 platform_tlb_migrate_finish, \ 214 platform_tlb_migrate_finish, \
212 platform_dma_init, \ 215 platform_dma_init, \
216 platform_dma_get_ops, \
213 platform_irq_to_vector, \ 217 platform_irq_to_vector, \
214 platform_local_vector_to_irq, \ 218 platform_local_vector_to_irq, \
215 platform_pci_get_legacy_mem, \ 219 platform_pci_get_legacy_mem, \
@@ -246,6 +250,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
246# endif /* CONFIG_IA64_GENERIC */ 250# endif /* CONFIG_IA64_GENERIC */
247 251
248extern void swiotlb_dma_init(void); 252extern void swiotlb_dma_init(void);
253extern struct dma_mapping_ops *dma_get_ops(struct device *);
249 254
250/* 255/*
251 * Define default versions so we can extend machvec for new platforms without having 256 * Define default versions so we can extend machvec for new platforms without having
@@ -279,6 +284,9 @@ extern void swiotlb_dma_init(void);
279#ifndef platform_dma_init 284#ifndef platform_dma_init
280# define platform_dma_init swiotlb_dma_init 285# define platform_dma_init swiotlb_dma_init
281#endif 286#endif
287#ifndef platform_dma_get_ops
288# define platform_dma_get_ops dma_get_ops
289#endif
282#ifndef platform_irq_to_vector 290#ifndef platform_irq_to_vector
283# define platform_irq_to_vector __ia64_irq_to_vector 291# define platform_irq_to_vector __ia64_irq_to_vector
284#endif 292#endif
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
index 876665ae9fff..427f69617226 100644
--- a/arch/ia64/kernel/dma-mapping.c
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -2,3 +2,9 @@
2 2
3struct dma_mapping_ops *dma_ops; 3struct dma_mapping_ops *dma_ops;
4EXPORT_SYMBOL(dma_ops); 4EXPORT_SYMBOL(dma_ops);
5
6struct dma_mapping_ops *dma_get_ops(struct device *dev)
7{
8 return dma_ops;
9}
10EXPORT_SYMBOL(dma_get_ops);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 1c1224bd0179..640669eba5d4 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -81,7 +81,7 @@ iommu_dma_init(void)
81 81
82int iommu_dma_supported(struct device *dev, u64 mask) 82int iommu_dma_supported(struct device *dev, u64 mask)
83{ 83{
84 struct dma_mapping_ops *ops = get_dma_ops(dev); 84 struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
85 85
86 if (ops->dma_supported_op) 86 if (ops->dma_supported_op)
87 return ops->dma_supported_op(dev, mask); 87 return ops->dma_supported_op(dev, mask);