aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 14:27:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 14:27:10 -0500
commitc45564e91604ca4d03505ba4d567541c7e4f86fe (patch)
treef686dce956b8ceef91215aff55009555875b394c
parent93874681aa3f538a2b9d59a6c5b7c0e882a36978 (diff)
parent4009793e15d44469da1547a46ab129cc08ffa503 (diff)
Merge branch 'for-v3.8' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull CMA and DMA-mapping update from Marek Szyprowski: "Another set of Contiguous Memory Allocator and DMA-mapping framework updates for v3.8. This pull request consists only of two patches. The first fixes a long standing issue with dmapools (the code predates current GIT history), which forced all allocations to use GFP_ATOMIC flag, ignoring the flags passed by the caller. The second patch changes CMA code to correctly use phys_addr_t type what enables support for LPAE systems." * 'for-v3.8' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: drivers: cma: represent physical addresses as phys_addr_t mm: dmapool: use provided gfp flags for all dma_alloc_coherent() calls
-rw-r--r--drivers/base/dma-contiguous.c24
-rw-r--r--include/linux/dma-contiguous.h4
-rw-r--r--mm/dmapool.c31
3 files changed, 19 insertions, 40 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 612afcc5a938..0ca54421ce97 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -57,8 +57,8 @@ struct cma *dma_contiguous_default_area;
57 * Users, who want to set the size of global CMA area for their system 57 * Users, who want to set the size of global CMA area for their system
58 * should use cma= kernel parameter. 58 * should use cma= kernel parameter.
59 */ 59 */
60static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M; 60static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
61static long size_cmdline = -1; 61static phys_addr_t size_cmdline = -1;
62 62
63static int __init early_cma(char *p) 63static int __init early_cma(char *p)
64{ 64{
@@ -70,7 +70,7 @@ early_param("cma", early_cma);
70 70
71#ifdef CONFIG_CMA_SIZE_PERCENTAGE 71#ifdef CONFIG_CMA_SIZE_PERCENTAGE
72 72
73static unsigned long __init __maybe_unused cma_early_percent_memory(void) 73static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
74{ 74{
75 struct memblock_region *reg; 75 struct memblock_region *reg;
76 unsigned long total_pages = 0; 76 unsigned long total_pages = 0;
@@ -88,7 +88,7 @@ static unsigned long __init __maybe_unused cma_early_percent_memory(void)
88 88
89#else 89#else
90 90
91static inline __maybe_unused unsigned long cma_early_percent_memory(void) 91static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
92{ 92{
93 return 0; 93 return 0;
94} 94}
@@ -106,7 +106,7 @@ static inline __maybe_unused unsigned long cma_early_percent_memory(void)
106 */ 106 */
107void __init dma_contiguous_reserve(phys_addr_t limit) 107void __init dma_contiguous_reserve(phys_addr_t limit)
108{ 108{
109 unsigned long selected_size = 0; 109 phys_addr_t selected_size = 0;
110 110
111 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); 111 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
112 112
@@ -126,7 +126,7 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
126 126
127 if (selected_size) { 127 if (selected_size) {
128 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 128 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
129 selected_size / SZ_1M); 129 (unsigned long)selected_size / SZ_1M);
130 130
131 dma_declare_contiguous(NULL, selected_size, 0, limit); 131 dma_declare_contiguous(NULL, selected_size, 0, limit);
132 } 132 }
@@ -227,11 +227,11 @@ core_initcall(cma_init_reserved_areas);
227 * called by board specific code when early allocator (memblock or bootmem) 227 * called by board specific code when early allocator (memblock or bootmem)
228 * is still activate. 228 * is still activate.
229 */ 229 */
230int __init dma_declare_contiguous(struct device *dev, unsigned long size, 230int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
231 phys_addr_t base, phys_addr_t limit) 231 phys_addr_t base, phys_addr_t limit)
232{ 232{
233 struct cma_reserved *r = &cma_reserved[cma_reserved_count]; 233 struct cma_reserved *r = &cma_reserved[cma_reserved_count];
234 unsigned long alignment; 234 phys_addr_t alignment;
235 235
236 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, 236 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
237 (unsigned long)size, (unsigned long)base, 237 (unsigned long)size, (unsigned long)base,
@@ -268,10 +268,6 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
268 if (!addr) { 268 if (!addr) {
269 base = -ENOMEM; 269 base = -ENOMEM;
270 goto err; 270 goto err;
271 } else if (addr + size > ~(unsigned long)0) {
272 memblock_free(addr, size);
273 base = -EINVAL;
274 goto err;
275 } else { 271 } else {
276 base = addr; 272 base = addr;
277 } 273 }
@@ -285,14 +281,14 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
285 r->size = size; 281 r->size = size;
286 r->dev = dev; 282 r->dev = dev;
287 cma_reserved_count++; 283 cma_reserved_count++;
288 pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M, 284 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
289 (unsigned long)base); 285 (unsigned long)base);
290 286
291 /* Architecture specific contiguous memory fixup. */ 287 /* Architecture specific contiguous memory fixup. */
292 dma_contiguous_early_fixup(base, size); 288 dma_contiguous_early_fixup(base, size);
293 return 0; 289 return 0;
294err: 290err:
295 pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M); 291 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
296 return base; 292 return base;
297} 293}
298 294
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 2f303e4b7ed3..01b5c84be828 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -68,7 +68,7 @@ struct device;
68extern struct cma *dma_contiguous_default_area; 68extern struct cma *dma_contiguous_default_area;
69 69
70void dma_contiguous_reserve(phys_addr_t addr_limit); 70void dma_contiguous_reserve(phys_addr_t addr_limit);
71int dma_declare_contiguous(struct device *dev, unsigned long size, 71int dma_declare_contiguous(struct device *dev, phys_addr_t size,
72 phys_addr_t base, phys_addr_t limit); 72 phys_addr_t base, phys_addr_t limit);
73 73
74struct page *dma_alloc_from_contiguous(struct device *dev, int count, 74struct page *dma_alloc_from_contiguous(struct device *dev, int count,
@@ -83,7 +83,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
83static inline void dma_contiguous_reserve(phys_addr_t limit) { } 83static inline void dma_contiguous_reserve(phys_addr_t limit) { }
84 84
85static inline 85static inline
86int dma_declare_contiguous(struct device *dev, unsigned long size, 86int dma_declare_contiguous(struct device *dev, phys_addr_t size,
87 phys_addr_t base, phys_addr_t limit) 87 phys_addr_t base, phys_addr_t limit)
88{ 88{
89 return -ENOSYS; 89 return -ENOSYS;
diff --git a/mm/dmapool.c b/mm/dmapool.c
index c5ab33bca0a8..da1b0f0b8709 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -50,7 +50,6 @@ struct dma_pool { /* the pool */
50 size_t allocation; 50 size_t allocation;
51 size_t boundary; 51 size_t boundary;
52 char name[32]; 52 char name[32];
53 wait_queue_head_t waitq;
54 struct list_head pools; 53 struct list_head pools;
55}; 54};
56 55
@@ -62,8 +61,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
62 unsigned int offset; 61 unsigned int offset;
63}; 62};
64 63
65#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
66
67static DEFINE_MUTEX(pools_lock); 64static DEFINE_MUTEX(pools_lock);
68 65
69static ssize_t 66static ssize_t
@@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
172 retval->size = size; 169 retval->size = size;
173 retval->boundary = boundary; 170 retval->boundary = boundary;
174 retval->allocation = allocation; 171 retval->allocation = allocation;
175 init_waitqueue_head(&retval->waitq);
176 172
177 if (dev) { 173 if (dev) {
178 int ret; 174 int ret;
@@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
227 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 223 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
228#endif 224#endif
229 pool_initialise_page(pool, page); 225 pool_initialise_page(pool, page);
230 list_add(&page->page_list, &pool->page_list);
231 page->in_use = 0; 226 page->in_use = 0;
232 page->offset = 0; 227 page->offset = 0;
233 } else { 228 } else {
@@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
315 might_sleep_if(mem_flags & __GFP_WAIT); 310 might_sleep_if(mem_flags & __GFP_WAIT);
316 311
317 spin_lock_irqsave(&pool->lock, flags); 312 spin_lock_irqsave(&pool->lock, flags);
318 restart:
319 list_for_each_entry(page, &pool->page_list, page_list) { 313 list_for_each_entry(page, &pool->page_list, page_list) {
320 if (page->offset < pool->allocation) 314 if (page->offset < pool->allocation)
321 goto ready; 315 goto ready;
322 } 316 }
323 page = pool_alloc_page(pool, GFP_ATOMIC);
324 if (!page) {
325 if (mem_flags & __GFP_WAIT) {
326 DECLARE_WAITQUEUE(wait, current);
327 317
328 __set_current_state(TASK_UNINTERRUPTIBLE); 318 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
329 __add_wait_queue(&pool->waitq, &wait); 319 spin_unlock_irqrestore(&pool->lock, flags);
330 spin_unlock_irqrestore(&pool->lock, flags);
331 320
332 schedule_timeout(POOL_TIMEOUT_JIFFIES); 321 page = pool_alloc_page(pool, mem_flags);
322 if (!page)
323 return NULL;
333 324
334 spin_lock_irqsave(&pool->lock, flags); 325 spin_lock_irqsave(&pool->lock, flags);
335 __remove_wait_queue(&pool->waitq, &wait);
336 goto restart;
337 }
338 retval = NULL;
339 goto done;
340 }
341 326
327 list_add(&page->page_list, &pool->page_list);
342 ready: 328 ready:
343 page->in_use++; 329 page->in_use++;
344 offset = page->offset; 330 offset = page->offset;
@@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
348#ifdef DMAPOOL_DEBUG 334#ifdef DMAPOOL_DEBUG
349 memset(retval, POOL_POISON_ALLOCATED, pool->size); 335 memset(retval, POOL_POISON_ALLOCATED, pool->size);
350#endif 336#endif
351 done:
352 spin_unlock_irqrestore(&pool->lock, flags); 337 spin_unlock_irqrestore(&pool->lock, flags);
353 return retval; 338 return retval;
354} 339}
@@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
435 page->in_use--; 420 page->in_use--;
436 *(int *)vaddr = page->offset; 421 *(int *)vaddr = page->offset;
437 page->offset = offset; 422 page->offset = offset;
438 if (waitqueue_active(&pool->waitq))
439 wake_up_locked(&pool->waitq);
440 /* 423 /*
441 * Resist a temptation to do 424 * Resist a temptation to do
442 * if (!is_page_busy(page)) pool_free_page(pool, page); 425 * if (!is_page_busy(page)) pool_free_page(pool, page);