diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-09-29 08:48:17 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-09-29 08:54:28 -0400 |
commit | 0e18b5d7c6339311f1e32e7b186ae3556c5b6d33 (patch) | |
tree | d9c3ce8112509cbf0e1e8e668711c1375feb4120 | |
parent | 3216a97bb0d5166ec5795aa3db1c3a02415ac060 (diff) |
[ARM] dma: add validation of DMA params
Validate the direction argument like x86 does. In addition,
validate the dma_unmap_* parameters against those passed to
dma_map_* when using the DMA bounce code.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/common/dmabounce.c | 11 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 8 |
2 files changed, 15 insertions, 4 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 22aec95c986..f030f0775be 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -289,6 +289,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
289 | 289 | ||
290 | if (buf) { | 290 | if (buf) { |
291 | BUG_ON(buf->size != size); | 291 | BUG_ON(buf->size != size); |
292 | BUG_ON(buf->direction != dir); | ||
292 | 293 | ||
293 | dev_dbg(dev, | 294 | dev_dbg(dev, |
294 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 295 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
@@ -334,7 +335,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |||
334 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 335 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
335 | __func__, ptr, size, dir); | 336 | __func__, ptr, size, dir); |
336 | 337 | ||
337 | BUG_ON(dir == DMA_NONE); | 338 | BUG_ON(!valid_dma_direction(dir)); |
338 | 339 | ||
339 | return map_single(dev, ptr, size, dir); | 340 | return map_single(dev, ptr, size, dir); |
340 | } | 341 | } |
@@ -346,7 +347,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
346 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", | 347 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", |
347 | __func__, page, offset, size, dir); | 348 | __func__, page, offset, size, dir); |
348 | 349 | ||
349 | BUG_ON(dir == DMA_NONE); | 350 | BUG_ON(!valid_dma_direction(dir)); |
350 | 351 | ||
351 | return map_single(dev, page_address(page) + offset, size, dir); | 352 | return map_single(dev, page_address(page) + offset, size, dir); |
352 | } | 353 | } |
@@ -365,8 +366,6 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
365 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 366 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
366 | __func__, (void *) dma_addr, size, dir); | 367 | __func__, (void *) dma_addr, size, dir); |
367 | 368 | ||
368 | BUG_ON(dir == DMA_NONE); | ||
369 | |||
370 | unmap_single(dev, dma_addr, size, dir); | 369 | unmap_single(dev, dma_addr, size, dir); |
371 | } | 370 | } |
372 | EXPORT_SYMBOL(dma_unmap_single); | 371 | EXPORT_SYMBOL(dma_unmap_single); |
@@ -383,6 +382,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | |||
383 | if (!buf) | 382 | if (!buf) |
384 | return 1; | 383 | return 1; |
385 | 384 | ||
385 | BUG_ON(buf->direction != dir); | ||
386 | |||
386 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 387 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
387 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 388 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), |
388 | buf->safe, buf->safe_dma_addr); | 389 | buf->safe, buf->safe_dma_addr); |
@@ -410,6 +411,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |||
410 | if (!buf) | 411 | if (!buf) |
411 | return 1; | 412 | return 1; |
412 | 413 | ||
414 | BUG_ON(buf->direction != dir); | ||
415 | |||
413 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 416 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
414 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 417 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), |
415 | buf->safe, buf->safe_dma_addr); | 418 | buf->safe, buf->safe_dma_addr); |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 1532b7a6079..2544a087c21 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -277,6 +277,8 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | |||
277 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 277 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
278 | size_t size, enum dma_data_direction dir) | 278 | size_t size, enum dma_data_direction dir) |
279 | { | 279 | { |
280 | BUG_ON(!valid_dma_direction(dir)); | ||
281 | |||
280 | if (!arch_is_coherent()) | 282 | if (!arch_is_coherent()) |
281 | dma_cache_maint(cpu_addr, size, dir); | 283 | dma_cache_maint(cpu_addr, size, dir); |
282 | 284 | ||
@@ -301,6 +303,8 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | |||
301 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 303 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
302 | unsigned long offset, size_t size, enum dma_data_direction dir) | 304 | unsigned long offset, size_t size, enum dma_data_direction dir) |
303 | { | 305 | { |
306 | BUG_ON(!valid_dma_direction(dir)); | ||
307 | |||
304 | if (!arch_is_coherent()) | 308 | if (!arch_is_coherent()) |
305 | dma_cache_maint(page_address(page) + offset, size, dir); | 309 | dma_cache_maint(page_address(page) + offset, size, dir); |
306 | 310 | ||
@@ -370,6 +374,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
370 | dma_addr_t handle, unsigned long offset, size_t size, | 374 | dma_addr_t handle, unsigned long offset, size_t size, |
371 | enum dma_data_direction dir) | 375 | enum dma_data_direction dir) |
372 | { | 376 | { |
377 | BUG_ON(!valid_dma_direction(dir)); | ||
378 | |||
373 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) | 379 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) |
374 | return; | 380 | return; |
375 | 381 | ||
@@ -381,6 +387,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
381 | dma_addr_t handle, unsigned long offset, size_t size, | 387 | dma_addr_t handle, unsigned long offset, size_t size, |
382 | enum dma_data_direction dir) | 388 | enum dma_data_direction dir) |
383 | { | 389 | { |
390 | BUG_ON(!valid_dma_direction(dir)); | ||
391 | |||
384 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | 392 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) |
385 | return; | 393 | return; |
386 | 394 | ||