diff options
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r-- | sound/core/memalloc.c | 309 |
1 files changed, 2 insertions, 307 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 5e1c7bc73b29..4595f93d151e 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c | |||
@@ -21,60 +21,18 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/proc_fs.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/pci.h> | ||
28 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
29 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
30 | #include <linux/seq_file.h> | ||
31 | #include <asm/uaccess.h> | ||
32 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
33 | #include <linux/genalloc.h> | 27 | #include <linux/genalloc.h> |
34 | #include <linux/moduleparam.h> | ||
35 | #include <linux/mutex.h> | ||
36 | #include <sound/memalloc.h> | 28 | #include <sound/memalloc.h> |
37 | 29 | ||
38 | |||
39 | MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>"); | ||
40 | MODULE_DESCRIPTION("Memory allocator for ALSA system."); | ||
41 | MODULE_LICENSE("GPL"); | ||
42 | |||
43 | |||
44 | /* | ||
45 | */ | ||
46 | |||
47 | static DEFINE_MUTEX(list_mutex); | ||
48 | static LIST_HEAD(mem_list_head); | ||
49 | |||
50 | /* buffer preservation list */ | ||
51 | struct snd_mem_list { | ||
52 | struct snd_dma_buffer buffer; | ||
53 | unsigned int id; | ||
54 | struct list_head list; | ||
55 | }; | ||
56 | |||
57 | /* id for pre-allocated buffers */ | ||
58 | #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1 | ||
59 | |||
60 | /* | 30 | /* |
61 | * | 31 | * |
62 | * Generic memory allocators | 32 | * Generic memory allocators |
63 | * | 33 | * |
64 | */ | 34 | */ |
65 | 35 | ||
66 | static long snd_allocated_pages; /* holding the number of allocated pages */ | ||
67 | |||
68 | static inline void inc_snd_pages(int order) | ||
69 | { | ||
70 | snd_allocated_pages += 1 << order; | ||
71 | } | ||
72 | |||
73 | static inline void dec_snd_pages(int order) | ||
74 | { | ||
75 | snd_allocated_pages -= 1 << order; | ||
76 | } | ||
77 | |||
78 | /** | 36 | /** |
79 | * snd_malloc_pages - allocate pages with the given size | 37 | * snd_malloc_pages - allocate pages with the given size |
80 | * @size: the size to allocate in bytes | 38 | * @size: the size to allocate in bytes |
@@ -87,7 +45,6 @@ static inline void dec_snd_pages(int order) | |||
87 | void *snd_malloc_pages(size_t size, gfp_t gfp_flags) | 45 | void *snd_malloc_pages(size_t size, gfp_t gfp_flags) |
88 | { | 46 | { |
89 | int pg; | 47 | int pg; |
90 | void *res; | ||
91 | 48 | ||
92 | if (WARN_ON(!size)) | 49 | if (WARN_ON(!size)) |
93 | return NULL; | 50 | return NULL; |
@@ -95,9 +52,7 @@ void *snd_malloc_pages(size_t size, gfp_t gfp_flags) | |||
95 | return NULL; | 52 | return NULL; |
96 | gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ | 53 | gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ |
97 | pg = get_order(size); | 54 | pg = get_order(size); |
98 | if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) | 55 | return (void *) __get_free_pages(gfp_flags, pg); |
99 | inc_snd_pages(pg); | ||
100 | return res; | ||
101 | } | 56 | } |
102 | 57 | ||
103 | /** | 58 | /** |
@@ -114,7 +69,6 @@ void snd_free_pages(void *ptr, size_t size) | |||
114 | if (ptr == NULL) | 69 | if (ptr == NULL) |
115 | return; | 70 | return; |
116 | pg = get_order(size); | 71 | pg = get_order(size); |
117 | dec_snd_pages(pg); | ||
118 | free_pages((unsigned long) ptr, pg); | 72 | free_pages((unsigned long) ptr, pg); |
119 | } | 73 | } |
120 | 74 | ||
@@ -129,7 +83,6 @@ void snd_free_pages(void *ptr, size_t size) | |||
129 | static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma) | 83 | static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma) |
130 | { | 84 | { |
131 | int pg; | 85 | int pg; |
132 | void *res; | ||
133 | gfp_t gfp_flags; | 86 | gfp_t gfp_flags; |
134 | 87 | ||
135 | if (WARN_ON(!dma)) | 88 | if (WARN_ON(!dma)) |
@@ -139,11 +92,7 @@ static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *d | |||
139 | | __GFP_COMP /* compound page lets parts be mapped */ | 92 | | __GFP_COMP /* compound page lets parts be mapped */ |
140 | | __GFP_NORETRY /* don't trigger OOM-killer */ | 93 | | __GFP_NORETRY /* don't trigger OOM-killer */ |
141 | | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ | 94 | | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ |
142 | res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); | 95 | return dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); |
143 | if (res != NULL) | ||
144 | inc_snd_pages(pg); | ||
145 | |||
146 | return res; | ||
147 | } | 96 | } |
148 | 97 | ||
149 | /* free the coherent DMA pages */ | 98 | /* free the coherent DMA pages */ |
@@ -155,7 +104,6 @@ static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr, | |||
155 | if (ptr == NULL) | 104 | if (ptr == NULL) |
156 | return; | 105 | return; |
157 | pg = get_order(size); | 106 | pg = get_order(size); |
158 | dec_snd_pages(pg); | ||
159 | dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); | 107 | dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); |
160 | } | 108 | } |
161 | 109 | ||
@@ -340,256 +288,6 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab) | |||
340 | } | 288 | } |
341 | } | 289 | } |
342 | 290 | ||
343 | |||
344 | /** | ||
345 | * snd_dma_get_reserved - get the reserved buffer for the given device | ||
346 | * @dmab: the buffer allocation record to store | ||
347 | * @id: the buffer id | ||
348 | * | ||
349 | * Looks for the reserved-buffer list and re-uses if the same buffer | ||
350 | * is found in the list. When the buffer is found, it's removed from the free list. | ||
351 | * | ||
352 | * Return: The size of buffer if the buffer is found, or zero if not found. | ||
353 | */ | ||
354 | size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id) | ||
355 | { | ||
356 | struct snd_mem_list *mem; | ||
357 | |||
358 | if (WARN_ON(!dmab)) | ||
359 | return 0; | ||
360 | |||
361 | mutex_lock(&list_mutex); | ||
362 | list_for_each_entry(mem, &mem_list_head, list) { | ||
363 | if (mem->id == id && | ||
364 | (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL || | ||
365 | ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) { | ||
366 | struct device *dev = dmab->dev.dev; | ||
367 | list_del(&mem->list); | ||
368 | *dmab = mem->buffer; | ||
369 | if (dmab->dev.dev == NULL) | ||
370 | dmab->dev.dev = dev; | ||
371 | kfree(mem); | ||
372 | mutex_unlock(&list_mutex); | ||
373 | return dmab->bytes; | ||
374 | } | ||
375 | } | ||
376 | mutex_unlock(&list_mutex); | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | /** | ||
381 | * snd_dma_reserve_buf - reserve the buffer | ||
382 | * @dmab: the buffer to reserve | ||
383 | * @id: the buffer id | ||
384 | * | ||
385 | * Reserves the given buffer as a reserved buffer. | ||
386 | * | ||
387 | * Return: Zero if successful, or a negative code on error. | ||
388 | */ | ||
389 | int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id) | ||
390 | { | ||
391 | struct snd_mem_list *mem; | ||
392 | |||
393 | if (WARN_ON(!dmab)) | ||
394 | return -EINVAL; | ||
395 | mem = kmalloc(sizeof(*mem), GFP_KERNEL); | ||
396 | if (! mem) | ||
397 | return -ENOMEM; | ||
398 | mutex_lock(&list_mutex); | ||
399 | mem->buffer = *dmab; | ||
400 | mem->id = id; | ||
401 | list_add_tail(&mem->list, &mem_list_head); | ||
402 | mutex_unlock(&list_mutex); | ||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * purge all reserved buffers | ||
408 | */ | ||
409 | static void free_all_reserved_pages(void) | ||
410 | { | ||
411 | struct list_head *p; | ||
412 | struct snd_mem_list *mem; | ||
413 | |||
414 | mutex_lock(&list_mutex); | ||
415 | while (! list_empty(&mem_list_head)) { | ||
416 | p = mem_list_head.next; | ||
417 | mem = list_entry(p, struct snd_mem_list, list); | ||
418 | list_del(p); | ||
419 | snd_dma_free_pages(&mem->buffer); | ||
420 | kfree(mem); | ||
421 | } | ||
422 | mutex_unlock(&list_mutex); | ||
423 | } | ||
424 | |||
425 | |||
426 | #ifdef CONFIG_PROC_FS | ||
427 | /* | ||
428 | * proc file interface | ||
429 | */ | ||
430 | #define SND_MEM_PROC_FILE "driver/snd-page-alloc" | ||
431 | static struct proc_dir_entry *snd_mem_proc; | ||
432 | |||
433 | static int snd_mem_proc_read(struct seq_file *seq, void *offset) | ||
434 | { | ||
435 | long pages = snd_allocated_pages >> (PAGE_SHIFT-12); | ||
436 | struct snd_mem_list *mem; | ||
437 | int devno; | ||
438 | static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" }; | ||
439 | |||
440 | mutex_lock(&list_mutex); | ||
441 | seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n", | ||
442 | pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); | ||
443 | devno = 0; | ||
444 | list_for_each_entry(mem, &mem_list_head, list) { | ||
445 | devno++; | ||
446 | seq_printf(seq, "buffer %d : ID %08x : type %s\n", | ||
447 | devno, mem->id, types[mem->buffer.dev.type]); | ||
448 | seq_printf(seq, " addr = 0x%lx, size = %d bytes\n", | ||
449 | (unsigned long)mem->buffer.addr, | ||
450 | (int)mem->buffer.bytes); | ||
451 | } | ||
452 | mutex_unlock(&list_mutex); | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static int snd_mem_proc_open(struct inode *inode, struct file *file) | ||
457 | { | ||
458 | return single_open(file, snd_mem_proc_read, NULL); | ||
459 | } | ||
460 | |||
461 | /* FIXME: for pci only - other bus? */ | ||
462 | #ifdef CONFIG_PCI | ||
463 | #define gettoken(bufp) strsep(bufp, " \t\n") | ||
464 | |||
465 | static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, | ||
466 | size_t count, loff_t * ppos) | ||
467 | { | ||
468 | char buf[128]; | ||
469 | char *token, *p; | ||
470 | |||
471 | if (count > sizeof(buf) - 1) | ||
472 | return -EINVAL; | ||
473 | if (copy_from_user(buf, buffer, count)) | ||
474 | return -EFAULT; | ||
475 | buf[count] = '\0'; | ||
476 | |||
477 | p = buf; | ||
478 | token = gettoken(&p); | ||
479 | if (! token || *token == '#') | ||
480 | return count; | ||
481 | if (strcmp(token, "add") == 0) { | ||
482 | char *endp; | ||
483 | int vendor, device, size, buffers; | ||
484 | long mask; | ||
485 | int i, alloced; | ||
486 | struct pci_dev *pci; | ||
487 | |||
488 | if ((token = gettoken(&p)) == NULL || | ||
489 | (vendor = simple_strtol(token, NULL, 0)) <= 0 || | ||
490 | (token = gettoken(&p)) == NULL || | ||
491 | (device = simple_strtol(token, NULL, 0)) <= 0 || | ||
492 | (token = gettoken(&p)) == NULL || | ||
493 | (mask = simple_strtol(token, NULL, 0)) < 0 || | ||
494 | (token = gettoken(&p)) == NULL || | ||
495 | (size = memparse(token, &endp)) < 64*1024 || | ||
496 | size > 16*1024*1024 /* too big */ || | ||
497 | (token = gettoken(&p)) == NULL || | ||
498 | (buffers = simple_strtol(token, NULL, 0)) <= 0 || | ||
499 | buffers > 4) { | ||
500 | printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); | ||
501 | return count; | ||
502 | } | ||
503 | vendor &= 0xffff; | ||
504 | device &= 0xffff; | ||
505 | |||
506 | alloced = 0; | ||
507 | pci = NULL; | ||
508 | while ((pci = pci_get_device(vendor, device, pci)) != NULL) { | ||
509 | if (mask > 0 && mask < 0xffffffff) { | ||
510 | if (pci_set_dma_mask(pci, mask) < 0 || | ||
511 | pci_set_consistent_dma_mask(pci, mask) < 0) { | ||
512 | printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); | ||
513 | pci_dev_put(pci); | ||
514 | return count; | ||
515 | } | ||
516 | } | ||
517 | for (i = 0; i < buffers; i++) { | ||
518 | struct snd_dma_buffer dmab; | ||
519 | memset(&dmab, 0, sizeof(dmab)); | ||
520 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), | ||
521 | size, &dmab) < 0) { | ||
522 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | ||
523 | pci_dev_put(pci); | ||
524 | return count; | ||
525 | } | ||
526 | snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); | ||
527 | } | ||
528 | alloced++; | ||
529 | } | ||
530 | if (! alloced) { | ||
531 | for (i = 0; i < buffers; i++) { | ||
532 | struct snd_dma_buffer dmab; | ||
533 | memset(&dmab, 0, sizeof(dmab)); | ||
534 | /* FIXME: We can allocate only in ZONE_DMA | ||
535 | * without a device pointer! | ||
536 | */ | ||
537 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL, | ||
538 | size, &dmab) < 0) { | ||
539 | printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); | ||
540 | break; | ||
541 | } | ||
542 | snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device)); | ||
543 | } | ||
544 | } | ||
545 | } else if (strcmp(token, "erase") == 0) | ||
546 | /* FIXME: need for releasing each buffer chunk? */ | ||
547 | free_all_reserved_pages(); | ||
548 | else | ||
549 | printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); | ||
550 | return count; | ||
551 | } | ||
552 | #endif /* CONFIG_PCI */ | ||
553 | |||
554 | static const struct file_operations snd_mem_proc_fops = { | ||
555 | .owner = THIS_MODULE, | ||
556 | .open = snd_mem_proc_open, | ||
557 | .read = seq_read, | ||
558 | #ifdef CONFIG_PCI | ||
559 | .write = snd_mem_proc_write, | ||
560 | #endif | ||
561 | .llseek = seq_lseek, | ||
562 | .release = single_release, | ||
563 | }; | ||
564 | |||
565 | #endif /* CONFIG_PROC_FS */ | ||
566 | |||
567 | /* | ||
568 | * module entry | ||
569 | */ | ||
570 | |||
571 | static int __init snd_mem_init(void) | ||
572 | { | ||
573 | #ifdef CONFIG_PROC_FS | ||
574 | snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL, | ||
575 | &snd_mem_proc_fops); | ||
576 | #endif | ||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | static void __exit snd_mem_exit(void) | ||
581 | { | ||
582 | remove_proc_entry(SND_MEM_PROC_FILE, NULL); | ||
583 | free_all_reserved_pages(); | ||
584 | if (snd_allocated_pages > 0) | ||
585 | printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages); | ||
586 | } | ||
587 | |||
588 | |||
589 | module_init(snd_mem_init) | ||
590 | module_exit(snd_mem_exit) | ||
591 | |||
592 | |||
593 | /* | 291 | /* |
594 | * exports | 292 | * exports |
595 | */ | 293 | */ |
@@ -597,8 +295,5 @@ EXPORT_SYMBOL(snd_dma_alloc_pages); | |||
597 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); | 295 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); |
598 | EXPORT_SYMBOL(snd_dma_free_pages); | 296 | EXPORT_SYMBOL(snd_dma_free_pages); |
599 | 297 | ||
600 | EXPORT_SYMBOL(snd_dma_get_reserved_buf); | ||
601 | EXPORT_SYMBOL(snd_dma_reserve_buf); | ||
602 | |||
603 | EXPORT_SYMBOL(snd_malloc_pages); | 298 | EXPORT_SYMBOL(snd_malloc_pages); |
604 | EXPORT_SYMBOL(snd_free_pages); | 299 | EXPORT_SYMBOL(snd_free_pages); |