diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-09-29 08:48:17 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-09-29 08:54:28 -0400 |
commit | 0e18b5d7c6339311f1e32e7b186ae3556c5b6d33 (patch) | |
tree | d9c3ce8112509cbf0e1e8e668711c1375feb4120 /arch/arm/include/asm/dma-mapping.h | |
parent | 3216a97bb0d5166ec5795aa3db1c3a02415ac060 (diff) |
[ARM] dma: add validation of DMA params
Validate the direction argument like x86 does. In addition,
validate the dma_unmap_* parameters against those passed to
dma_map_* when using the DMA bounce code.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 1532b7a6079d..2544a087c213 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -277,6 +277,8 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | |||
277 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 277 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
278 | size_t size, enum dma_data_direction dir) | 278 | size_t size, enum dma_data_direction dir) |
279 | { | 279 | { |
280 | BUG_ON(!valid_dma_direction(dir)); | ||
281 | |||
280 | if (!arch_is_coherent()) | 282 | if (!arch_is_coherent()) |
281 | dma_cache_maint(cpu_addr, size, dir); | 283 | dma_cache_maint(cpu_addr, size, dir); |
282 | 284 | ||
@@ -301,6 +303,8 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | |||
301 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 303 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
302 | unsigned long offset, size_t size, enum dma_data_direction dir) | 304 | unsigned long offset, size_t size, enum dma_data_direction dir) |
303 | { | 305 | { |
306 | BUG_ON(!valid_dma_direction(dir)); | ||
307 | |||
304 | if (!arch_is_coherent()) | 308 | if (!arch_is_coherent()) |
305 | dma_cache_maint(page_address(page) + offset, size, dir); | 309 | dma_cache_maint(page_address(page) + offset, size, dir); |
306 | 310 | ||
@@ -370,6 +374,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
370 | dma_addr_t handle, unsigned long offset, size_t size, | 374 | dma_addr_t handle, unsigned long offset, size_t size, |
371 | enum dma_data_direction dir) | 375 | enum dma_data_direction dir) |
372 | { | 376 | { |
377 | BUG_ON(!valid_dma_direction(dir)); | ||
378 | |||
373 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) | 379 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) |
374 | return; | 380 | return; |
375 | 381 | ||
@@ -381,6 +387,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
381 | dma_addr_t handle, unsigned long offset, size_t size, | 387 | dma_addr_t handle, unsigned long offset, size_t size, |
382 | enum dma_data_direction dir) | 388 | enum dma_data_direction dir) |
383 | { | 389 | { |
390 | BUG_ON(!valid_dma_direction(dir)); | ||
391 | |||
384 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | 392 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) |
385 | return; | 393 | return; |
386 | 394 | ||