aboutsummaryrefslogtreecommitdiffstats
path: root/sound/core/memalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r--sound/core/memalloc.c56
1 files changed, 14 insertions, 42 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 19b3dcbb09c2..3fc6f97075ed 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -31,7 +31,7 @@
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
34#include <asm/semaphore.h> 34#include <linux/mutex.h>
35#include <sound/memalloc.h> 35#include <sound/memalloc.h>
36#ifdef CONFIG_SBUS 36#ifdef CONFIG_SBUS
37#include <asm/sbus.h> 37#include <asm/sbus.h>
@@ -54,7 +54,7 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);
54/* 54/*
55 */ 55 */
56 56
57static DECLARE_MUTEX(list_mutex); 57static DEFINE_MUTEX(list_mutex);
58static LIST_HEAD(mem_list_head); 58static LIST_HEAD(mem_list_head);
59 59
60/* buffer preservation list */ 60/* buffer preservation list */
@@ -83,7 +83,7 @@ struct snd_mem_list {
83 * Hacks 83 * Hacks
84 */ 84 */
85 85
86#if defined(__i386__) || defined(__ppc__) || defined(__x86_64__) 86#if defined(__i386__)
87/* 87/*
88 * A hack to allocate large buffers via dma_alloc_coherent() 88 * A hack to allocate large buffers via dma_alloc_coherent()
89 * 89 *
@@ -141,10 +141,6 @@ static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size,
141 141
142#endif /* arch */ 142#endif /* arch */
143 143
144#if ! defined(__arm__)
145#define NEED_RESERVE_PAGES
146#endif
147
148/* 144/*
149 * 145 *
150 * Generic memory allocators 146 * Generic memory allocators
@@ -163,20 +159,6 @@ static inline void dec_snd_pages(int order)
163 snd_allocated_pages -= 1 << order; 159 snd_allocated_pages -= 1 << order;
164} 160}
165 161
166static void mark_pages(struct page *page, int order)
167{
168 struct page *last_page = page + (1 << order);
169 while (page < last_page)
170 SetPageReserved(page++);
171}
172
173static void unmark_pages(struct page *page, int order)
174{
175 struct page *last_page = page + (1 << order);
176 while (page < last_page)
177 ClearPageReserved(page++);
178}
179
180/** 162/**
181 * snd_malloc_pages - allocate pages with the given size 163 * snd_malloc_pages - allocate pages with the given size
182 * @size: the size to allocate in bytes 164 * @size: the size to allocate in bytes
@@ -195,10 +177,8 @@ void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
195 snd_assert(gfp_flags != 0, return NULL); 177 snd_assert(gfp_flags != 0, return NULL);
196 gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ 178 gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
197 pg = get_order(size); 179 pg = get_order(size);
198 if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) { 180 if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
199 mark_pages(virt_to_page(res), pg);
200 inc_snd_pages(pg); 181 inc_snd_pages(pg);
201 }
202 return res; 182 return res;
203} 183}
204 184
@@ -217,7 +197,6 @@ void snd_free_pages(void *ptr, size_t size)
217 return; 197 return;
218 pg = get_order(size); 198 pg = get_order(size);
219 dec_snd_pages(pg); 199 dec_snd_pages(pg);
220 unmark_pages(virt_to_page(ptr), pg);
221 free_pages((unsigned long) ptr, pg); 200 free_pages((unsigned long) ptr, pg);
222} 201}
223 202
@@ -242,12 +221,8 @@ static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *d
242 | __GFP_NORETRY /* don't trigger OOM-killer */ 221 | __GFP_NORETRY /* don't trigger OOM-killer */
243 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ 222 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
244 res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); 223 res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
245 if (res != NULL) { 224 if (res != NULL)
246#ifdef NEED_RESERVE_PAGES
247 mark_pages(virt_to_page(res), pg); /* should be dma_to_page() */
248#endif
249 inc_snd_pages(pg); 225 inc_snd_pages(pg);
250 }
251 226
252 return res; 227 return res;
253} 228}
@@ -262,9 +237,6 @@ static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
262 return; 237 return;
263 pg = get_order(size); 238 pg = get_order(size);
264 dec_snd_pages(pg); 239 dec_snd_pages(pg);
265#ifdef NEED_RESERVE_PAGES
266 unmark_pages(virt_to_page(ptr), pg); /* should be dma_to_page() */
267#endif
268 dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); 240 dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
269} 241}
270 242
@@ -440,7 +412,7 @@ size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
440 412
441 snd_assert(dmab, return 0); 413 snd_assert(dmab, return 0);
442 414
443 down(&list_mutex); 415 mutex_lock(&list_mutex);
444 list_for_each(p, &mem_list_head) { 416 list_for_each(p, &mem_list_head) {
445 mem = list_entry(p, struct snd_mem_list, list); 417 mem = list_entry(p, struct snd_mem_list, list);
446 if (mem->id == id && 418 if (mem->id == id &&
@@ -452,11 +424,11 @@ size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
452 if (dmab->dev.dev == NULL) 424 if (dmab->dev.dev == NULL)
453 dmab->dev.dev = dev; 425 dmab->dev.dev = dev;
454 kfree(mem); 426 kfree(mem);
455 up(&list_mutex); 427 mutex_unlock(&list_mutex);
456 return dmab->bytes; 428 return dmab->bytes;
457 } 429 }
458 } 430 }
459 up(&list_mutex); 431 mutex_unlock(&list_mutex);
460 return 0; 432 return 0;
461} 433}
462 434
@@ -477,11 +449,11 @@ int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
477 mem = kmalloc(sizeof(*mem), GFP_KERNEL); 449 mem = kmalloc(sizeof(*mem), GFP_KERNEL);
478 if (! mem) 450 if (! mem)
479 return -ENOMEM; 451 return -ENOMEM;
480 down(&list_mutex); 452 mutex_lock(&list_mutex);
481 mem->buffer = *dmab; 453 mem->buffer = *dmab;
482 mem->id = id; 454 mem->id = id;
483 list_add_tail(&mem->list, &mem_list_head); 455 list_add_tail(&mem->list, &mem_list_head);
484 up(&list_mutex); 456 mutex_unlock(&list_mutex);
485 return 0; 457 return 0;
486} 458}
487 459
@@ -493,7 +465,7 @@ static void free_all_reserved_pages(void)
493 struct list_head *p; 465 struct list_head *p;
494 struct snd_mem_list *mem; 466 struct snd_mem_list *mem;
495 467
496 down(&list_mutex); 468 mutex_lock(&list_mutex);
497 while (! list_empty(&mem_list_head)) { 469 while (! list_empty(&mem_list_head)) {
498 p = mem_list_head.next; 470 p = mem_list_head.next;
499 mem = list_entry(p, struct snd_mem_list, list); 471 mem = list_entry(p, struct snd_mem_list, list);
@@ -501,7 +473,7 @@ static void free_all_reserved_pages(void)
501 snd_dma_free_pages(&mem->buffer); 473 snd_dma_free_pages(&mem->buffer);
502 kfree(mem); 474 kfree(mem);
503 } 475 }
504 up(&list_mutex); 476 mutex_unlock(&list_mutex);
505} 477}
506 478
507 479
@@ -522,7 +494,7 @@ static int snd_mem_proc_read(char *page, char **start, off_t off,
522 int devno; 494 int devno;
523 static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; 495 static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" };
524 496
525 down(&list_mutex); 497 mutex_lock(&list_mutex);
526 len += snprintf(page + len, count - len, 498 len += snprintf(page + len, count - len,
527 "pages : %li bytes (%li pages per %likB)\n", 499 "pages : %li bytes (%li pages per %likB)\n",
528 pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); 500 pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
@@ -537,7 +509,7 @@ static int snd_mem_proc_read(char *page, char **start, off_t off,
537 " addr = 0x%lx, size = %d bytes\n", 509 " addr = 0x%lx, size = %d bytes\n",
538 (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes); 510 (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes);
539 } 511 }
540 up(&list_mutex); 512 mutex_unlock(&list_mutex);
541 return len; 513 return len;
542} 514}
543 515