diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-15 20:35:01 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-15 20:35:01 -0400 |
| commit | 56b880e2e38da6c76c454052a93c0a92aa3586f7 (patch) | |
| tree | 3c37784b36901cafed61f5aeee7d3d7c40c3b424 | |
| parent | 1043e3becfbae0bee618f9b4f9598b145c4774e1 (diff) | |
| parent | c080e26edc3a2a3cdfa4c430c663ee1c3bbd8fae (diff) | |
Merge branch 'fixes-for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull DMA-mapping fixes from Marek Szyprowski:
"A set of minor fixes for dma-mapping code (ARM and x86) required for
Contiguous Memory Allocator (CMA) patches merged in v3.5-rc1."
* 'fixes-for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
x86: dma-mapping: fix broken allocation when dma_mask has been provided
ARM: dma-mapping: fix debug messages in dmabounce code
ARM: mm: fix type of the arm_dma_limit global variable
ARM: dma-mapping: Add missing static storage class specifier
| -rw-r--r-- | arch/arm/common/dmabounce.c | 16 | ||||
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 4 | ||||
| -rw-r--r-- | arch/arm/mm/init.c | 2 | ||||
| -rw-r--r-- | arch/arm/mm/mm.h | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/pci-dma.c | 3 |
5 files changed, 14 insertions, 13 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 9d7eb530f95f..aa07f5938f05 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
| @@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | |||
| 366 | struct safe_buffer *buf; | 366 | struct safe_buffer *buf; |
| 367 | unsigned long off; | 367 | unsigned long off; |
| 368 | 368 | ||
| 369 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | 369 | dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", |
| 370 | __func__, addr, off, sz, dir); | 370 | __func__, addr, sz, dir); |
| 371 | 371 | ||
| 372 | buf = find_safe_buffer_dev(dev, addr, __func__); | 372 | buf = find_safe_buffer_dev(dev, addr, __func__); |
| 373 | if (!buf) | 373 | if (!buf) |
| @@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | |||
| 377 | 377 | ||
| 378 | BUG_ON(buf->direction != dir); | 378 | BUG_ON(buf->direction != dir); |
| 379 | 379 | ||
| 380 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 380 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", |
| 381 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 381 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, |
| 382 | buf->safe, buf->safe_dma_addr); | 382 | buf->safe, buf->safe_dma_addr); |
| 383 | 383 | ||
| 384 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | 384 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
| @@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |||
| 406 | struct safe_buffer *buf; | 406 | struct safe_buffer *buf; |
| 407 | unsigned long off; | 407 | unsigned long off; |
| 408 | 408 | ||
| 409 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | 409 | dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", |
| 410 | __func__, addr, off, sz, dir); | 410 | __func__, addr, sz, dir); |
| 411 | 411 | ||
| 412 | buf = find_safe_buffer_dev(dev, addr, __func__); | 412 | buf = find_safe_buffer_dev(dev, addr, __func__); |
| 413 | if (!buf) | 413 | if (!buf) |
| @@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |||
| 417 | 417 | ||
| 418 | BUG_ON(buf->direction != dir); | 418 | BUG_ON(buf->direction != dir); |
| 419 | 419 | ||
| 420 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 420 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", |
| 421 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 421 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, |
| 422 | buf->safe, buf->safe_dma_addr); | 422 | buf->safe, buf->safe_dma_addr); |
| 423 | 423 | ||
| 424 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | 424 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 106c4c0ebccd..d766e4256b74 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -228,7 +228,7 @@ static pte_t **consistent_pte; | |||
| 228 | 228 | ||
| 229 | #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M | 229 | #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M |
| 230 | 230 | ||
| 231 | unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; | 231 | static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; |
| 232 | 232 | ||
| 233 | void __init init_consistent_dma_size(unsigned long size) | 233 | void __init init_consistent_dma_size(unsigned long size) |
| 234 | { | 234 | { |
| @@ -321,7 +321,7 @@ static struct arm_vmregion_head coherent_head = { | |||
| 321 | .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), | 321 | .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), |
| 322 | }; | 322 | }; |
| 323 | 323 | ||
| 324 | size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; | 324 | static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; |
| 325 | 325 | ||
| 326 | static int __init early_coherent_pool(char *p) | 326 | static int __init early_coherent_pool(char *p) |
| 327 | { | 327 | { |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c21d06c7dd7e..f54d59219764 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
| @@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size); | |||
| 212 | * allocations. This must be the smallest DMA mask in the system, | 212 | * allocations. This must be the smallest DMA mask in the system, |
| 213 | * so a successful GFP_DMA allocation will always satisfy this. | 213 | * so a successful GFP_DMA allocation will always satisfy this. |
| 214 | */ | 214 | */ |
| 215 | u32 arm_dma_limit; | 215 | phys_addr_t arm_dma_limit; |
| 216 | 216 | ||
| 217 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, | 217 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, |
| 218 | unsigned long dma_size) | 218 | unsigned long dma_size) |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 93dc0c17cdcb..c471436c7952 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
| @@ -62,7 +62,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page | |||
| 62 | #endif | 62 | #endif |
| 63 | 63 | ||
| 64 | #ifdef CONFIG_ZONE_DMA | 64 | #ifdef CONFIG_ZONE_DMA |
| 65 | extern u32 arm_dma_limit; | 65 | extern phys_addr_t arm_dma_limit; |
| 66 | #else | 66 | #else |
| 67 | #define arm_dma_limit ((u32)~0) | 67 | #define arm_dma_limit ((u32)~0) |
| 68 | #endif | 68 | #endif |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 62c9457ccd2f..c0f420f76cd3 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
| @@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
| 100 | struct dma_attrs *attrs) | 100 | struct dma_attrs *attrs) |
| 101 | { | 101 | { |
| 102 | unsigned long dma_mask; | 102 | unsigned long dma_mask; |
| 103 | struct page *page = NULL; | 103 | struct page *page; |
| 104 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 104 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 105 | dma_addr_t addr; | 105 | dma_addr_t addr; |
| 106 | 106 | ||
| @@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
| 108 | 108 | ||
| 109 | flag |= __GFP_ZERO; | 109 | flag |= __GFP_ZERO; |
| 110 | again: | 110 | again: |
| 111 | page = NULL; | ||
| 111 | if (!(flag & GFP_ATOMIC)) | 112 | if (!(flag & GFP_ATOMIC)) |
| 112 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); | 113 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); |
| 113 | if (!page) | 114 | if (!page) |
