aboutsummaryrefslogtreecommitdiffstats
path: root/mm/dmapool.c
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-11-07 09:37:07 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-12-11 03:28:08 -0500
commit387870f2d6d679746020fa8e25ef786ff338dc98 (patch)
tree3faebf54631b06febae94f7d626370e8250d9526 /mm/dmapool.c
parent29594404d7fe73cd80eaa4ee8c43dcc53970c60e (diff)
mm: dmapool: use provided gfp flags for all dma_alloc_coherent() calls
dmapool always calls dma_alloc_coherent() with GFP_ATOMIC flag, regardless the flags provided by the caller. This causes excessive pruning of emergency memory pools without any good reason. Additionaly, on ARM architecture any driver which is using dmapools will sooner or later trigger the following error: "ERROR: 256 KiB atomic DMA coherent pool is too small! Please increase it with coherent_pool= kernel parameter!". Increasing the coherent pool size usually doesn't help much and only delays such error, because all GFP_ATOMIC DMA allocations are always served from the special, very limited memory pool. This patch changes the dmapool code to correctly use gfp flags provided by the dmapool caller. Reported-by: Soeren Moch <smoch@web.de> Reported-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Andrew Lunn <andrew@lunn.ch> Tested-by: Soeren Moch <smoch@web.de> Cc: stable@vger.kernel.org
Diffstat (limited to 'mm/dmapool.c')
-rw-r--r--mm/dmapool.c31
1 files changed, 7 insertions, 24 deletions
diff --git a/mm/dmapool.c b/mm/dmapool.c
index c5ab33bca0a8..da1b0f0b8709 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -50,7 +50,6 @@ struct dma_pool { /* the pool */
50 size_t allocation; 50 size_t allocation;
51 size_t boundary; 51 size_t boundary;
52 char name[32]; 52 char name[32];
53 wait_queue_head_t waitq;
54 struct list_head pools; 53 struct list_head pools;
55}; 54};
56 55
@@ -62,8 +61,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
62 unsigned int offset; 61 unsigned int offset;
63}; 62};
64 63
65#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
66
67static DEFINE_MUTEX(pools_lock); 64static DEFINE_MUTEX(pools_lock);
68 65
69static ssize_t 66static ssize_t
@@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
172 retval->size = size; 169 retval->size = size;
173 retval->boundary = boundary; 170 retval->boundary = boundary;
174 retval->allocation = allocation; 171 retval->allocation = allocation;
175 init_waitqueue_head(&retval->waitq);
176 172
177 if (dev) { 173 if (dev) {
178 int ret; 174 int ret;
@@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
227 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 223 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
228#endif 224#endif
229 pool_initialise_page(pool, page); 225 pool_initialise_page(pool, page);
230 list_add(&page->page_list, &pool->page_list);
231 page->in_use = 0; 226 page->in_use = 0;
232 page->offset = 0; 227 page->offset = 0;
233 } else { 228 } else {
@@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
315 might_sleep_if(mem_flags & __GFP_WAIT); 310 might_sleep_if(mem_flags & __GFP_WAIT);
316 311
317 spin_lock_irqsave(&pool->lock, flags); 312 spin_lock_irqsave(&pool->lock, flags);
318 restart:
319 list_for_each_entry(page, &pool->page_list, page_list) { 313 list_for_each_entry(page, &pool->page_list, page_list) {
320 if (page->offset < pool->allocation) 314 if (page->offset < pool->allocation)
321 goto ready; 315 goto ready;
322 } 316 }
323 page = pool_alloc_page(pool, GFP_ATOMIC);
324 if (!page) {
325 if (mem_flags & __GFP_WAIT) {
326 DECLARE_WAITQUEUE(wait, current);
327 317
328 __set_current_state(TASK_UNINTERRUPTIBLE); 318 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
329 __add_wait_queue(&pool->waitq, &wait); 319 spin_unlock_irqrestore(&pool->lock, flags);
330 spin_unlock_irqrestore(&pool->lock, flags);
331 320
332 schedule_timeout(POOL_TIMEOUT_JIFFIES); 321 page = pool_alloc_page(pool, mem_flags);
322 if (!page)
323 return NULL;
333 324
334 spin_lock_irqsave(&pool->lock, flags); 325 spin_lock_irqsave(&pool->lock, flags);
335 __remove_wait_queue(&pool->waitq, &wait);
336 goto restart;
337 }
338 retval = NULL;
339 goto done;
340 }
341 326
327 list_add(&page->page_list, &pool->page_list);
342 ready: 328 ready:
343 page->in_use++; 329 page->in_use++;
344 offset = page->offset; 330 offset = page->offset;
@@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
348#ifdef DMAPOOL_DEBUG 334#ifdef DMAPOOL_DEBUG
349 memset(retval, POOL_POISON_ALLOCATED, pool->size); 335 memset(retval, POOL_POISON_ALLOCATED, pool->size);
350#endif 336#endif
351 done:
352 spin_unlock_irqrestore(&pool->lock, flags); 337 spin_unlock_irqrestore(&pool->lock, flags);
353 return retval; 338 return retval;
354} 339}
@@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
435 page->in_use--; 420 page->in_use--;
436 *(int *)vaddr = page->offset; 421 *(int *)vaddr = page->offset;
437 page->offset = offset; 422 page->offset = offset;
438 if (waitqueue_active(&pool->waitq))
439 wake_up_locked(&pool->waitq);
440 /* 423 /*
441 * Resist a temptation to do 424 * Resist a temptation to do
442 * if (!is_page_busy(page)) pool_free_page(pool, page); 425 * if (!is_page_busy(page)) pool_free_page(pool, page);