aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-avr32/dma-mapping.h
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor@insightbb.com>2007-02-10 01:26:32 -0500
committerDmitry Torokhov <dtor@insightbb.com>2007-02-10 01:26:32 -0500
commitb22364c8eec89e6b0c081a237f3b6348df87796f (patch)
tree233a923281fb640106465d076997ff511efb6edf /include/asm-avr32/dma-mapping.h
parent2c8dc071517ec2843869024dc82be2e246f41064 (diff)
parent66efc5a7e3061c3597ac43a8bb1026488d57e66b (diff)
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'include/asm-avr32/dma-mapping.h')
-rw-r--r--include/asm-avr32/dma-mapping.h20
1 files changed, 14 insertions, 6 deletions
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 0580b5d62bba..115813e48fe0 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -32,6 +32,14 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
32 return 0; 32 return 0;
33} 33}
34 34
35/*
36 * dma_map_single can't fail as it is implemented now.
37 */
38static inline int dma_mapping_error(dma_addr_t addr)
39{
40 return 0;
41}
42
35/** 43/**
36 * dma_alloc_coherent - allocate consistent memory for DMA 44 * dma_alloc_coherent - allocate consistent memory for DMA
37 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 45 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -109,7 +117,7 @@ static inline dma_addr_t
109dma_map_single(struct device *dev, void *cpu_addr, size_t size, 117dma_map_single(struct device *dev, void *cpu_addr, size_t size,
110 enum dma_data_direction direction) 118 enum dma_data_direction direction)
111{ 119{
112 dma_cache_sync(cpu_addr, size, direction); 120 dma_cache_sync(dev, cpu_addr, size, direction);
113 return virt_to_bus(cpu_addr); 121 return virt_to_bus(cpu_addr);
114} 122}
115 123
@@ -211,7 +219,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
211 219
212 sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset; 220 sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
213 virt = page_address(sg[i].page) + sg[i].offset; 221 virt = page_address(sg[i].page) + sg[i].offset;
214 dma_cache_sync(virt, sg[i].length, direction); 222 dma_cache_sync(dev, virt, sg[i].length, direction);
215 } 223 }
216 224
217 return nents; 225 return nents;
@@ -256,14 +264,14 @@ static inline void
256dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 264dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
257 size_t size, enum dma_data_direction direction) 265 size_t size, enum dma_data_direction direction)
258{ 266{
259 dma_cache_sync(bus_to_virt(dma_handle), size, direction); 267 dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
260} 268}
261 269
262static inline void 270static inline void
263dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 271dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
264 size_t size, enum dma_data_direction direction) 272 size_t size, enum dma_data_direction direction)
265{ 273{
266 dma_cache_sync(bus_to_virt(dma_handle), size, direction); 274 dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
267} 275}
268 276
269/** 277/**
@@ -286,7 +294,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
286 int i; 294 int i;
287 295
288 for (i = 0; i < nents; i++) { 296 for (i = 0; i < nents; i++) {
289 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 297 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
290 sg[i].length, direction); 298 sg[i].length, direction);
291 } 299 }
292} 300}
@@ -298,7 +306,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
298 int i; 306 int i;
299 307
300 for (i = 0; i < nents; i++) { 308 for (i = 0; i < nents; i++) {
301 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 309 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
302 sg[i].length, direction); 310 sg[i].length, direction);
303 } 311 }
304} 312}