diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-12 09:05:39 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-12 09:05:39 -0400 |
commit | a9b9e81c915e4a57ac3b21d1a7fa7ff184639780 (patch) | |
tree | 98304395fbb5b9c74fca35b196cd414c1949f280 /arch/arm/include/asm/dma-mapping.h | |
parent | a8b71a2810386a5ac8f43d2095fe3355f0d8db37 (diff) | |
parent | fd048088306656824958e7783ffcee27e241b361 (diff) |
Merge branch 'linus' into x86/memory-corruption-check
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 378 |
1 files changed, 143 insertions, 235 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 7b95d2058395..1cb8602dd9d5 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -104,15 +104,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
104 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync | 104 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync |
105 | * function so drivers using this API are highlighted with build warnings. | 105 | * function so drivers using this API are highlighted with build warnings. |
106 | */ | 106 | */ |
107 | static inline void * | 107 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
108 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) | 108 | dma_addr_t *handle, gfp_t gfp) |
109 | { | 109 | { |
110 | return NULL; | 110 | return NULL; |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline void | 113 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
114 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | 114 | void *cpu_addr, dma_addr_t handle) |
115 | dma_addr_t handle) | ||
116 | { | 115 | { |
117 | } | 116 | } |
118 | 117 | ||
@@ -127,8 +126,7 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | |||
127 | * return the CPU-viewed address, and sets @handle to be the | 126 | * return the CPU-viewed address, and sets @handle to be the |
128 | * device-viewed address. | 127 | * device-viewed address. |
129 | */ | 128 | */ |
130 | extern void * | 129 | extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); |
131 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); | ||
132 | 130 | ||
133 | /** | 131 | /** |
134 | * dma_free_coherent - free memory allocated by dma_alloc_coherent | 132 | * dma_free_coherent - free memory allocated by dma_alloc_coherent |
@@ -143,9 +141,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf | |||
143 | * References to memory and mappings associated with cpu_addr/handle | 141 | * References to memory and mappings associated with cpu_addr/handle |
144 | * during and after this call executing are illegal. | 142 | * during and after this call executing are illegal. |
145 | */ | 143 | */ |
146 | extern void | 144 | extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); |
147 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
148 | dma_addr_t handle); | ||
149 | 145 | ||
150 | /** | 146 | /** |
151 | * dma_mmap_coherent - map a coherent DMA allocation into user space | 147 | * dma_mmap_coherent - map a coherent DMA allocation into user space |
@@ -159,8 +155,8 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | |||
159 | * into user space. The coherent DMA buffer must not be freed by the | 155 | * into user space. The coherent DMA buffer must not be freed by the |
160 | * driver until the user space mapping has been released. | 156 | * driver until the user space mapping has been released. |
161 | */ | 157 | */ |
162 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 158 | int dma_mmap_coherent(struct device *, struct vm_area_struct *, |
163 | void *cpu_addr, dma_addr_t handle, size_t size); | 159 | void *, dma_addr_t, size_t); |
164 | 160 | ||
165 | 161 | ||
166 | /** | 162 | /** |
@@ -174,14 +170,94 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | |||
174 | * return the CPU-viewed address, and sets @handle to be the | 170 | * return the CPU-viewed address, and sets @handle to be the |
175 | * device-viewed address. | 171 | * device-viewed address. |
176 | */ | 172 | */ |
177 | extern void * | 173 | extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, |
178 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); | 174 | gfp_t); |
179 | 175 | ||
180 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ | 176 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ |
181 | dma_free_coherent(dev,size,cpu_addr,handle) | 177 | dma_free_coherent(dev,size,cpu_addr,handle) |
182 | 178 | ||
183 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | 179 | int dma_mmap_writecombine(struct device *, struct vm_area_struct *, |
184 | void *cpu_addr, dma_addr_t handle, size_t size); | 180 | void *, dma_addr_t, size_t); |
181 | |||
182 | |||
183 | #ifdef CONFIG_DMABOUNCE | ||
184 | /* | ||
185 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | ||
186 | * and utilize bounce buffers as needed to work around limited DMA windows. | ||
187 | * | ||
188 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | ||
189 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | ||
190 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) | ||
191 | * | ||
192 | * The following are helper functions used by the dmabounce subystem | ||
193 | * | ||
194 | */ | ||
195 | |||
196 | /** | ||
197 | * dmabounce_register_dev | ||
198 | * | ||
199 | * @dev: valid struct device pointer | ||
200 | * @small_buf_size: size of buffers to use with small buffer pool | ||
201 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | ||
202 | * | ||
203 | * This function should be called by low-level platform code to register | ||
204 | * a device as requireing DMA buffer bouncing. The function will allocate | ||
205 | * appropriate DMA pools for the device. | ||
206 | * | ||
207 | */ | ||
208 | extern int dmabounce_register_dev(struct device *, unsigned long, | ||
209 | unsigned long); | ||
210 | |||
211 | /** | ||
212 | * dmabounce_unregister_dev | ||
213 | * | ||
214 | * @dev: valid struct device pointer | ||
215 | * | ||
216 | * This function should be called by low-level platform code when device | ||
217 | * that was previously registered with dmabounce_register_dev is removed | ||
218 | * from the system. | ||
219 | * | ||
220 | */ | ||
221 | extern void dmabounce_unregister_dev(struct device *); | ||
222 | |||
223 | /** | ||
224 | * dma_needs_bounce | ||
225 | * | ||
226 | * @dev: valid struct device pointer | ||
227 | * @dma_handle: dma_handle of unbounced buffer | ||
228 | * @size: size of region being mapped | ||
229 | * | ||
230 | * Platforms that utilize the dmabounce mechanism must implement | ||
231 | * this function. | ||
232 | * | ||
233 | * The dmabounce routines call this function whenever a dma-mapping | ||
234 | * is requested to determine whether a given buffer needs to be bounced | ||
235 | * or not. The function must return 0 if the buffer is OK for | ||
236 | * DMA access and 1 if the buffer needs to be bounced. | ||
237 | * | ||
238 | */ | ||
239 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
240 | |||
241 | /* | ||
242 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | ||
243 | */ | ||
244 | extern dma_addr_t dma_map_single(struct device *, void *, size_t, | ||
245 | enum dma_data_direction); | ||
246 | extern dma_addr_t dma_map_page(struct device *, struct page *, | ||
247 | unsigned long, size_t, enum dma_data_direction); | ||
248 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, | ||
249 | enum dma_data_direction); | ||
250 | |||
251 | /* | ||
252 | * Private functions | ||
253 | */ | ||
254 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | ||
255 | size_t, enum dma_data_direction); | ||
256 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | ||
257 | size_t, enum dma_data_direction); | ||
258 | #else | ||
259 | #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) | ||
260 | #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) | ||
185 | 261 | ||
186 | 262 | ||
187 | /** | 263 | /** |
@@ -198,19 +274,16 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | |||
198 | * can regain ownership by calling dma_unmap_single() or | 274 | * can regain ownership by calling dma_unmap_single() or |
199 | * dma_sync_single_for_cpu(). | 275 | * dma_sync_single_for_cpu(). |
200 | */ | 276 | */ |
201 | #ifndef CONFIG_DMABOUNCE | 277 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
202 | static inline dma_addr_t | 278 | size_t size, enum dma_data_direction dir) |
203 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
204 | enum dma_data_direction dir) | ||
205 | { | 279 | { |
280 | BUG_ON(!valid_dma_direction(dir)); | ||
281 | |||
206 | if (!arch_is_coherent()) | 282 | if (!arch_is_coherent()) |
207 | dma_cache_maint(cpu_addr, size, dir); | 283 | dma_cache_maint(cpu_addr, size, dir); |
208 | 284 | ||
209 | return virt_to_dma(dev, cpu_addr); | 285 | return virt_to_dma(dev, cpu_addr); |
210 | } | 286 | } |
211 | #else | ||
212 | extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); | ||
213 | #endif | ||
214 | 287 | ||
215 | /** | 288 | /** |
216 | * dma_map_page - map a portion of a page for streaming DMA | 289 | * dma_map_page - map a portion of a page for streaming DMA |
@@ -224,23 +297,25 @@ extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_d | |||
224 | * or written back. | 297 | * or written back. |
225 | * | 298 | * |
226 | * The device owns this memory once this call has completed. The CPU | 299 | * The device owns this memory once this call has completed. The CPU |
227 | * can regain ownership by calling dma_unmap_page() or | 300 | * can regain ownership by calling dma_unmap_page(). |
228 | * dma_sync_single_for_cpu(). | ||
229 | */ | 301 | */ |
230 | static inline dma_addr_t | 302 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
231 | dma_map_page(struct device *dev, struct page *page, | 303 | unsigned long offset, size_t size, enum dma_data_direction dir) |
232 | unsigned long offset, size_t size, | ||
233 | enum dma_data_direction dir) | ||
234 | { | 304 | { |
235 | return dma_map_single(dev, page_address(page) + offset, size, dir); | 305 | BUG_ON(!valid_dma_direction(dir)); |
306 | |||
307 | if (!arch_is_coherent()) | ||
308 | dma_cache_maint(page_address(page) + offset, size, dir); | ||
309 | |||
310 | return page_to_dma(dev, page) + offset; | ||
236 | } | 311 | } |
237 | 312 | ||
238 | /** | 313 | /** |
239 | * dma_unmap_single - unmap a single buffer previously mapped | 314 | * dma_unmap_single - unmap a single buffer previously mapped |
240 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 315 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
241 | * @handle: DMA address of buffer | 316 | * @handle: DMA address of buffer |
242 | * @size: size of buffer to map | 317 | * @size: size of buffer (same as passed to dma_map_single) |
243 | * @dir: DMA transfer direction | 318 | * @dir: DMA transfer direction (same as passed to dma_map_single) |
244 | * | 319 | * |
245 | * Unmap a single streaming mode DMA translation. The handle and size | 320 | * Unmap a single streaming mode DMA translation. The handle and size |
246 | * must match what was provided in the previous dma_map_single() call. | 321 | * must match what was provided in the previous dma_map_single() call. |
@@ -249,108 +324,34 @@ dma_map_page(struct device *dev, struct page *page, | |||
249 | * After this call, reads by the CPU to the buffer are guaranteed to see | 324 | * After this call, reads by the CPU to the buffer are guaranteed to see |
250 | * whatever the device wrote there. | 325 | * whatever the device wrote there. |
251 | */ | 326 | */ |
252 | #ifndef CONFIG_DMABOUNCE | 327 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, |
253 | static inline void | 328 | size_t size, enum dma_data_direction dir) |
254 | dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, | ||
255 | enum dma_data_direction dir) | ||
256 | { | 329 | { |
257 | /* nothing to do */ | 330 | /* nothing to do */ |
258 | } | 331 | } |
259 | #else | 332 | #endif /* CONFIG_DMABOUNCE */ |
260 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); | ||
261 | #endif | ||
262 | 333 | ||
263 | /** | 334 | /** |
264 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | 335 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() |
265 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 336 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
266 | * @handle: DMA address of buffer | 337 | * @handle: DMA address of buffer |
267 | * @size: size of buffer to map | 338 | * @size: size of buffer (same as passed to dma_map_page) |
268 | * @dir: DMA transfer direction | 339 | * @dir: DMA transfer direction (same as passed to dma_map_page) |
269 | * | 340 | * |
270 | * Unmap a single streaming mode DMA translation. The handle and size | 341 | * Unmap a page streaming mode DMA translation. The handle and size |
271 | * must match what was provided in the previous dma_map_single() call. | 342 | * must match what was provided in the previous dma_map_page() call. |
272 | * All other usages are undefined. | 343 | * All other usages are undefined. |
273 | * | 344 | * |
274 | * After this call, reads by the CPU to the buffer are guaranteed to see | 345 | * After this call, reads by the CPU to the buffer are guaranteed to see |
275 | * whatever the device wrote there. | 346 | * whatever the device wrote there. |
276 | */ | 347 | */ |
277 | static inline void | 348 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, |
278 | dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | 349 | size_t size, enum dma_data_direction dir) |
279 | enum dma_data_direction dir) | ||
280 | { | 350 | { |
281 | dma_unmap_single(dev, handle, size, dir); | 351 | dma_unmap_single(dev, handle, size, dir); |
282 | } | 352 | } |
283 | 353 | ||
284 | /** | 354 | /** |
285 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | ||
286 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
287 | * @sg: list of buffers | ||
288 | * @nents: number of buffers to map | ||
289 | * @dir: DMA transfer direction | ||
290 | * | ||
291 | * Map a set of buffers described by scatterlist in streaming | ||
292 | * mode for DMA. This is the scatter-gather version of the | ||
293 | * above dma_map_single interface. Here the scatter gather list | ||
294 | * elements are each tagged with the appropriate dma address | ||
295 | * and length. They are obtained via sg_dma_{address,length}(SG). | ||
296 | * | ||
297 | * NOTE: An implementation may be able to use a smaller number of | ||
298 | * DMA address/length pairs than there are SG table elements. | ||
299 | * (for example via virtual mapping capabilities) | ||
300 | * The routine returns the number of addr/length pairs actually | ||
301 | * used, at most nents. | ||
302 | * | ||
303 | * Device ownership issues as mentioned above for dma_map_single are | ||
304 | * the same here. | ||
305 | */ | ||
306 | #ifndef CONFIG_DMABOUNCE | ||
307 | static inline int | ||
308 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
309 | enum dma_data_direction dir) | ||
310 | { | ||
311 | int i; | ||
312 | |||
313 | for (i = 0; i < nents; i++, sg++) { | ||
314 | char *virt; | ||
315 | |||
316 | sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset; | ||
317 | virt = sg_virt(sg); | ||
318 | |||
319 | if (!arch_is_coherent()) | ||
320 | dma_cache_maint(virt, sg->length, dir); | ||
321 | } | ||
322 | |||
323 | return nents; | ||
324 | } | ||
325 | #else | ||
326 | extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | ||
327 | #endif | ||
328 | |||
329 | /** | ||
330 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | ||
331 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
332 | * @sg: list of buffers | ||
333 | * @nents: number of buffers to map | ||
334 | * @dir: DMA transfer direction | ||
335 | * | ||
336 | * Unmap a set of streaming mode DMA translations. | ||
337 | * Again, CPU read rules concerning calls here are the same as for | ||
338 | * dma_unmap_single() above. | ||
339 | */ | ||
340 | #ifndef CONFIG_DMABOUNCE | ||
341 | static inline void | ||
342 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
343 | enum dma_data_direction dir) | ||
344 | { | ||
345 | |||
346 | /* nothing to do */ | ||
347 | } | ||
348 | #else | ||
349 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | ||
350 | #endif | ||
351 | |||
352 | |||
353 | /** | ||
354 | * dma_sync_single_range_for_cpu | 355 | * dma_sync_single_range_for_cpu |
355 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 356 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
356 | * @handle: DMA address of buffer | 357 | * @handle: DMA address of buffer |
@@ -368,145 +369,52 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da | |||
368 | * must first the perform a dma_sync_for_device, and then the | 369 | * must first the perform a dma_sync_for_device, and then the |
369 | * device again owns the buffer. | 370 | * device again owns the buffer. |
370 | */ | 371 | */ |
371 | #ifndef CONFIG_DMABOUNCE | 372 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
372 | static inline void | 373 | dma_addr_t handle, unsigned long offset, size_t size, |
373 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, | 374 | enum dma_data_direction dir) |
374 | unsigned long offset, size_t size, | ||
375 | enum dma_data_direction dir) | ||
376 | { | 375 | { |
377 | if (!arch_is_coherent()) | 376 | BUG_ON(!valid_dma_direction(dir)); |
378 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | 377 | |
378 | dmabounce_sync_for_cpu(dev, handle, offset, size, dir); | ||
379 | } | 379 | } |
380 | 380 | ||
381 | static inline void | 381 | static inline void dma_sync_single_range_for_device(struct device *dev, |
382 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, | 382 | dma_addr_t handle, unsigned long offset, size_t size, |
383 | unsigned long offset, size_t size, | 383 | enum dma_data_direction dir) |
384 | enum dma_data_direction dir) | ||
385 | { | 384 | { |
385 | BUG_ON(!valid_dma_direction(dir)); | ||
386 | |||
387 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | ||
388 | return; | ||
389 | |||
386 | if (!arch_is_coherent()) | 390 | if (!arch_is_coherent()) |
387 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); | 391 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
388 | } | 392 | } |
389 | #else | ||
390 | extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); | ||
391 | extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); | ||
392 | #endif | ||
393 | 393 | ||
394 | static inline void | 394 | static inline void dma_sync_single_for_cpu(struct device *dev, |
395 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | 395 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
396 | enum dma_data_direction dir) | ||
397 | { | 396 | { |
398 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); | 397 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); |
399 | } | 398 | } |
400 | 399 | ||
401 | static inline void | 400 | static inline void dma_sync_single_for_device(struct device *dev, |
402 | dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | 401 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
403 | enum dma_data_direction dir) | ||
404 | { | 402 | { |
405 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); | 403 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); |
406 | } | 404 | } |
407 | 405 | ||
408 | |||
409 | /** | ||
410 | * dma_sync_sg_for_cpu | ||
411 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
412 | * @sg: list of buffers | ||
413 | * @nents: number of buffers to map | ||
414 | * @dir: DMA transfer direction | ||
415 | * | ||
416 | * Make physical memory consistent for a set of streaming | ||
417 | * mode DMA translations after a transfer. | ||
418 | * | ||
419 | * The same as dma_sync_single_for_* but for a scatter-gather list, | ||
420 | * same rules and usage. | ||
421 | */ | ||
422 | #ifndef CONFIG_DMABOUNCE | ||
423 | static inline void | ||
424 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | ||
425 | enum dma_data_direction dir) | ||
426 | { | ||
427 | int i; | ||
428 | |||
429 | for (i = 0; i < nents; i++, sg++) { | ||
430 | char *virt = sg_virt(sg); | ||
431 | if (!arch_is_coherent()) | ||
432 | dma_cache_maint(virt, sg->length, dir); | ||
433 | } | ||
434 | } | ||
435 | |||
436 | static inline void | ||
437 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | ||
438 | enum dma_data_direction dir) | ||
439 | { | ||
440 | int i; | ||
441 | |||
442 | for (i = 0; i < nents; i++, sg++) { | ||
443 | char *virt = sg_virt(sg); | ||
444 | if (!arch_is_coherent()) | ||
445 | dma_cache_maint(virt, sg->length, dir); | ||
446 | } | ||
447 | } | ||
448 | #else | ||
449 | extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); | ||
450 | extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); | ||
451 | #endif | ||
452 | |||
453 | #ifdef CONFIG_DMABOUNCE | ||
454 | /* | 406 | /* |
455 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | 407 | * The scatter list versions of the above methods. |
456 | * and utilize bounce buffers as needed to work around limited DMA windows. | ||
457 | * | ||
458 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | ||
459 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | ||
460 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) | ||
461 | * | ||
462 | * The following are helper functions used by the dmabounce subystem | ||
463 | * | ||
464 | */ | ||
465 | |||
466 | /** | ||
467 | * dmabounce_register_dev | ||
468 | * | ||
469 | * @dev: valid struct device pointer | ||
470 | * @small_buf_size: size of buffers to use with small buffer pool | ||
471 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | ||
472 | * | ||
473 | * This function should be called by low-level platform code to register | ||
474 | * a device as requireing DMA buffer bouncing. The function will allocate | ||
475 | * appropriate DMA pools for the device. | ||
476 | * | ||
477 | */ | ||
478 | extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); | ||
479 | |||
480 | /** | ||
481 | * dmabounce_unregister_dev | ||
482 | * | ||
483 | * @dev: valid struct device pointer | ||
484 | * | ||
485 | * This function should be called by low-level platform code when device | ||
486 | * that was previously registered with dmabounce_register_dev is removed | ||
487 | * from the system. | ||
488 | * | ||
489 | */ | 408 | */ |
490 | extern void dmabounce_unregister_dev(struct device *); | 409 | extern int dma_map_sg(struct device *, struct scatterlist *, int, |
410 | enum dma_data_direction); | ||
411 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, | ||
412 | enum dma_data_direction); | ||
413 | extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | ||
414 | enum dma_data_direction); | ||
415 | extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | ||
416 | enum dma_data_direction); | ||
491 | 417 | ||
492 | /** | ||
493 | * dma_needs_bounce | ||
494 | * | ||
495 | * @dev: valid struct device pointer | ||
496 | * @dma_handle: dma_handle of unbounced buffer | ||
497 | * @size: size of region being mapped | ||
498 | * | ||
499 | * Platforms that utilize the dmabounce mechanism must implement | ||
500 | * this function. | ||
501 | * | ||
502 | * The dmabounce routines call this function whenever a dma-mapping | ||
503 | * is requested to determine whether a given buffer needs to be bounced | ||
504 | * or not. The function must return 0 if the buffer is OK for | ||
505 | * DMA access and 1 if the buffer needs to be bounced. | ||
506 | * | ||
507 | */ | ||
508 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
509 | #endif /* CONFIG_DMABOUNCE */ | ||
510 | 418 | ||
511 | #endif /* __KERNEL__ */ | 419 | #endif /* __KERNEL__ */ |
512 | #endif | 420 | #endif |