diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-01-05 09:36:17 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-06 08:06:52 -0500 |
commit | c7b3aee8af5bd0d73d5779a4ad82a1496771d3ef (patch) | |
tree | cbb8b00a3776a1bd99ffc08d45542333bf509454 /arch/ia64/hp | |
parent | c190ab0b2a5fb5cc97576c5f04f4419b6cf8dc8e (diff) |
remove hwsw_dma_ops
This removes remove hwsw_dma_ops (and hwsw_*
functions). hwsw_dma_get_ops can select swiotlb_dma_ops and
sba_dma_ops appropriately.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/ia64/hp')
-rw-r--r-- | arch/ia64/hp/common/hwsw_iommu.c | 183 |
1 files changed, 12 insertions, 171 deletions
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 5cf750e1fddc..e5bbeba77810 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c | |||
@@ -17,55 +17,33 @@ | |||
17 | #include <linux/swiotlb.h> | 17 | #include <linux/swiotlb.h> |
18 | #include <asm/machvec.h> | 18 | #include <asm/machvec.h> |
19 | 19 | ||
20 | extern struct dma_mapping_ops sba_dma_ops, swiotlb_dma_ops; | ||
21 | |||
20 | /* swiotlb declarations & definitions: */ | 22 | /* swiotlb declarations & definitions: */ |
21 | extern int swiotlb_late_init_with_default_size (size_t size); | 23 | extern int swiotlb_late_init_with_default_size (size_t size); |
22 | 24 | ||
23 | /* hwiommu declarations & definitions: */ | ||
24 | |||
25 | extern void *sba_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); | ||
26 | extern void sba_free_coherent (struct device *, size_t, void *, dma_addr_t); | ||
27 | extern dma_addr_t sba_map_single_attrs(struct device *, void *, size_t, int, | ||
28 | struct dma_attrs *); | ||
29 | extern void sba_unmap_single_attrs(struct device *, dma_addr_t, size_t, int, | ||
30 | struct dma_attrs *); | ||
31 | extern int sba_map_sg_attrs(struct device *, struct scatterlist *, int, int, | ||
32 | struct dma_attrs *); | ||
33 | extern void sba_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, | ||
34 | struct dma_attrs *); | ||
35 | extern int sba_dma_supported (struct device *, u64); | ||
36 | extern int sba_dma_mapping_error(struct device *, dma_addr_t); | ||
37 | |||
38 | #define hwiommu_alloc_coherent sba_alloc_coherent | ||
39 | #define hwiommu_free_coherent sba_free_coherent | ||
40 | #define hwiommu_map_single_attrs sba_map_single_attrs | ||
41 | #define hwiommu_unmap_single_attrs sba_unmap_single_attrs | ||
42 | #define hwiommu_map_sg_attrs sba_map_sg_attrs | ||
43 | #define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs | ||
44 | #define hwiommu_dma_supported sba_dma_supported | ||
45 | #define hwiommu_dma_mapping_error sba_dma_mapping_error | ||
46 | #define hwiommu_sync_single_for_cpu machvec_dma_sync_single | ||
47 | #define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg | ||
48 | #define hwiommu_sync_single_for_device machvec_dma_sync_single | ||
49 | #define hwiommu_sync_sg_for_device machvec_dma_sync_sg | ||
50 | |||
51 | |||
52 | /* | 25 | /* |
53 | * Note: we need to make the determination of whether or not to use | 26 | * Note: we need to make the determination of whether or not to use |
54 | * the sw I/O TLB based purely on the device structure. Anything else | 27 | * the sw I/O TLB based purely on the device structure. Anything else |
55 | * would be unreliable or would be too intrusive. | 28 | * would be unreliable or would be too intrusive. |
56 | */ | 29 | */ |
57 | static inline int | 30 | static inline int use_swiotlb(struct device *dev) |
58 | use_swiotlb (struct device *dev) | ||
59 | { | 31 | { |
60 | return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); | 32 | return dev && dev->dma_mask && |
33 | !sba_dma_ops.dma_supported_op(dev, *dev->dma_mask); | ||
61 | } | 34 | } |
62 | 35 | ||
63 | struct dma_mapping_ops hwsw_dma_ops; | 36 | struct dma_mapping_ops *hwsw_dma_get_ops(struct device *dev) |
37 | { | ||
38 | if (use_swiotlb(dev)) | ||
39 | return &swiotlb_dma_ops; | ||
40 | return &sba_dma_ops; | ||
41 | } | ||
42 | EXPORT_SYMBOL(hwsw_dma_get_ops); | ||
64 | 43 | ||
65 | void __init | 44 | void __init |
66 | hwsw_init (void) | 45 | hwsw_init (void) |
67 | { | 46 | { |
68 | dma_ops = &hwsw_dma_ops; | ||
69 | /* default to a smallish 2MB sw I/O TLB */ | 47 | /* default to a smallish 2MB sw I/O TLB */ |
70 | if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) { | 48 | if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) { |
71 | #ifdef CONFIG_IA64_GENERIC | 49 | #ifdef CONFIG_IA64_GENERIC |
@@ -78,140 +56,3 @@ hwsw_init (void) | |||
78 | #endif | 56 | #endif |
79 | } | 57 | } |
80 | } | 58 | } |
81 | |||
82 | void * | ||
83 | hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) | ||
84 | { | ||
85 | if (use_swiotlb(dev)) | ||
86 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | ||
87 | else | ||
88 | return hwiommu_alloc_coherent(dev, size, dma_handle, flags); | ||
89 | } | ||
90 | |||
91 | void | ||
92 | hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
93 | { | ||
94 | if (use_swiotlb(dev)) | ||
95 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | ||
96 | else | ||
97 | hwiommu_free_coherent(dev, size, vaddr, dma_handle); | ||
98 | } | ||
99 | |||
100 | dma_addr_t | ||
101 | hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, | ||
102 | struct dma_attrs *attrs) | ||
103 | { | ||
104 | if (use_swiotlb(dev)) | ||
105 | return swiotlb_map_single_attrs(dev, addr, size, dir, attrs); | ||
106 | else | ||
107 | return hwiommu_map_single_attrs(dev, addr, size, dir, attrs); | ||
108 | } | ||
109 | EXPORT_SYMBOL(hwsw_map_single_attrs); | ||
110 | |||
111 | void | ||
112 | hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | ||
113 | int dir, struct dma_attrs *attrs) | ||
114 | { | ||
115 | if (use_swiotlb(dev)) | ||
116 | return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs); | ||
117 | else | ||
118 | return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs); | ||
119 | } | ||
120 | EXPORT_SYMBOL(hwsw_unmap_single_attrs); | ||
121 | |||
122 | int | ||
123 | hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
124 | int dir, struct dma_attrs *attrs) | ||
125 | { | ||
126 | if (use_swiotlb(dev)) | ||
127 | return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs); | ||
128 | else | ||
129 | return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs); | ||
130 | } | ||
131 | EXPORT_SYMBOL(hwsw_map_sg_attrs); | ||
132 | |||
133 | void | ||
134 | hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
135 | int dir, struct dma_attrs *attrs) | ||
136 | { | ||
137 | if (use_swiotlb(dev)) | ||
138 | return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs); | ||
139 | else | ||
140 | return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs); | ||
141 | } | ||
142 | EXPORT_SYMBOL(hwsw_unmap_sg_attrs); | ||
143 | |||
144 | void | ||
145 | hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
146 | { | ||
147 | if (use_swiotlb(dev)) | ||
148 | swiotlb_sync_single_for_cpu(dev, addr, size, dir); | ||
149 | else | ||
150 | hwiommu_sync_single_for_cpu(dev, addr, size, dir); | ||
151 | } | ||
152 | |||
153 | void | ||
154 | hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
155 | { | ||
156 | if (use_swiotlb(dev)) | ||
157 | swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
158 | else | ||
159 | hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
160 | } | ||
161 | |||
162 | void | ||
163 | hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
164 | { | ||
165 | if (use_swiotlb(dev)) | ||
166 | swiotlb_sync_single_for_device(dev, addr, size, dir); | ||
167 | else | ||
168 | hwiommu_sync_single_for_device(dev, addr, size, dir); | ||
169 | } | ||
170 | |||
171 | void | ||
172 | hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
173 | { | ||
174 | if (use_swiotlb(dev)) | ||
175 | swiotlb_sync_sg_for_device(dev, sg, nelems, dir); | ||
176 | else | ||
177 | hwiommu_sync_sg_for_device(dev, sg, nelems, dir); | ||
178 | } | ||
179 | |||
180 | int | ||
181 | hwsw_dma_supported (struct device *dev, u64 mask) | ||
182 | { | ||
183 | if (hwiommu_dma_supported(dev, mask)) | ||
184 | return 1; | ||
185 | return swiotlb_dma_supported(dev, mask); | ||
186 | } | ||
187 | |||
188 | int | ||
189 | hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
190 | { | ||
191 | return hwiommu_dma_mapping_error(dev, dma_addr) || | ||
192 | swiotlb_dma_mapping_error(dev, dma_addr); | ||
193 | } | ||
194 | |||
195 | EXPORT_SYMBOL(hwsw_dma_mapping_error); | ||
196 | EXPORT_SYMBOL(hwsw_dma_supported); | ||
197 | EXPORT_SYMBOL(hwsw_alloc_coherent); | ||
198 | EXPORT_SYMBOL(hwsw_free_coherent); | ||
199 | EXPORT_SYMBOL(hwsw_sync_single_for_cpu); | ||
200 | EXPORT_SYMBOL(hwsw_sync_single_for_device); | ||
201 | EXPORT_SYMBOL(hwsw_sync_sg_for_cpu); | ||
202 | EXPORT_SYMBOL(hwsw_sync_sg_for_device); | ||
203 | |||
204 | struct dma_mapping_ops hwsw_dma_ops = { | ||
205 | .alloc_coherent = hwsw_alloc_coherent, | ||
206 | .free_coherent = hwsw_free_coherent, | ||
207 | .map_single_attrs = hwsw_map_single_attrs, | ||
208 | .unmap_single_attrs = hwsw_unmap_single_attrs, | ||
209 | .map_sg_attrs = hwsw_map_sg_attrs, | ||
210 | .unmap_sg_attrs = hwsw_unmap_sg_attrs, | ||
211 | .sync_single_for_cpu = hwsw_sync_single_for_cpu, | ||
212 | .sync_sg_for_cpu = hwsw_sync_sg_for_cpu, | ||
213 | .sync_single_for_device = hwsw_sync_single_for_device, | ||
214 | .sync_sg_for_device = hwsw_sync_sg_for_device, | ||
215 | .dma_supported_op = hwsw_dma_supported, | ||
216 | .mapping_error = hwsw_dma_mapping_error, | ||
217 | }; | ||