aboutsummaryrefslogtreecommitdiffstats
path: root/sound/pci/emu10k1/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'sound/pci/emu10k1/memory.c')
-rw-r--r--sound/pci/emu10k1/memory.c101
1 files changed, 78 insertions, 23 deletions
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 4f1f69be1865..5865f3b90b34 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -34,7 +34,10 @@
34 * aligned pages in others 34 * aligned pages in others
35 */ 35 */
36#define __set_ptb_entry(emu,page,addr) \ 36#define __set_ptb_entry(emu,page,addr) \
37 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page))) 37 (((__le32 *)(emu)->ptb_pages.area)[page] = \
38 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
39#define __get_ptb_entry(emu, page) \
40 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
38 41
39#define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) 42#define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
40#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES) 43#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
@@ -44,8 +47,7 @@
44/* get offset address from aligned page */ 47/* get offset address from aligned page */
45#define aligned_page_offset(page) ((page) << PAGE_SHIFT) 48#define aligned_page_offset(page) ((page) << PAGE_SHIFT)
46 49
47#if PAGE_SIZE == 4096 50#if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
48/* page size == EMUPAGESIZE */
49/* fill PTB entrie(s) corresponding to page with addr */ 51/* fill PTB entrie(s) corresponding to page with addr */
50#define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) 52#define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
51/* fill PTB entrie(s) corresponding to page with silence pointer */ 53/* fill PTB entrie(s) corresponding to page with silence pointer */
@@ -58,6 +60,8 @@ static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t a
58 page *= UNIT_PAGES; 60 page *= UNIT_PAGES;
59 for (i = 0; i < UNIT_PAGES; i++, page++) { 61 for (i = 0; i < UNIT_PAGES; i++, page++) {
60 __set_ptb_entry(emu, page, addr); 62 __set_ptb_entry(emu, page, addr);
63 dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
64 (unsigned int)__get_ptb_entry(emu, page));
61 addr += EMUPAGESIZE; 65 addr += EMUPAGESIZE;
62 } 66 }
63} 67}
@@ -65,9 +69,12 @@ static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
65{ 69{
66 int i; 70 int i;
67 page *= UNIT_PAGES; 71 page *= UNIT_PAGES;
68 for (i = 0; i < UNIT_PAGES; i++, page++) 72 for (i = 0; i < UNIT_PAGES; i++, page++) {
69 /* do not increment ptr */ 73 /* do not increment ptr */
70 __set_ptb_entry(emu, page, emu->silent_page.addr); 74 __set_ptb_entry(emu, page, emu->silent_page.addr);
75 dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
76 page, (unsigned int)__get_ptb_entry(emu, page));
77 }
71} 78}
72#endif /* PAGE_SIZE */ 79#endif /* PAGE_SIZE */
73 80
@@ -102,7 +109,7 @@ static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
102 */ 109 */
103static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) 110static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
104{ 111{
105 int page = 0, found_page = -ENOMEM; 112 int page = 1, found_page = -ENOMEM;
106 int max_size = npages; 113 int max_size = npages;
107 int size; 114 int size;
108 struct list_head *candidate = &emu->mapped_link_head; 115 struct list_head *candidate = &emu->mapped_link_head;
@@ -147,6 +154,10 @@ static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
147 page = search_empty_map_area(emu, blk->pages, &next); 154 page = search_empty_map_area(emu, blk->pages, &next);
148 if (page < 0) /* not found */ 155 if (page < 0) /* not found */
149 return page; 156 return page;
157 if (page == 0) {
158 dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
159 return -EINVAL;
160 }
150 /* insert this block in the proper position of mapped list */ 161 /* insert this block in the proper position of mapped list */
151 list_add_tail(&blk->mapped_link, next); 162 list_add_tail(&blk->mapped_link, next);
152 /* append this as a newest block in order list */ 163 /* append this as a newest block in order list */
@@ -177,7 +188,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
177 q = get_emu10k1_memblk(p, mapped_link); 188 q = get_emu10k1_memblk(p, mapped_link);
178 start_page = q->mapped_page + q->pages; 189 start_page = q->mapped_page + q->pages;
179 } else 190 } else
180 start_page = 0; 191 start_page = 1;
181 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) { 192 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
182 q = get_emu10k1_memblk(p, mapped_link); 193 q = get_emu10k1_memblk(p, mapped_link);
183 end_page = q->mapped_page; 194 end_page = q->mapped_page;
@@ -366,6 +377,33 @@ int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
366 return snd_emu10k1_synth_free(emu, blk); 377 return snd_emu10k1_synth_free(emu, blk);
367} 378}
368 379
380/*
381 * allocate DMA pages, widening the allocation if necessary
382 *
383 * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why
384 * this might be needed.
385 *
386 * If you modify this function check whether __synth_free_pages() also needs
387 * changes.
388 */
389int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
390 struct snd_dma_buffer *dmab)
391{
392 if (emu->iommu_workaround) {
393 size_t npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
394 size_t size_real = npages * PAGE_SIZE;
395
396 /*
397 * The device has been observed to accesses up to 256 extra
398 * bytes, but use 1k to be safe.
399 */
400 if (size_real < size + 1024)
401 size += PAGE_SIZE;
402 }
403
404 return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
405 snd_dma_pci_data(emu->pci), size, dmab);
406}
369 407
370/* 408/*
371 * memory allocation using multiple pages (for synth) 409 * memory allocation using multiple pages (for synth)
@@ -450,10 +488,27 @@ static void get_single_page_range(struct snd_util_memhdr *hdr,
450static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page, 488static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
451 int last_page) 489 int last_page)
452{ 490{
491 struct snd_dma_buffer dmab;
453 int page; 492 int page;
454 493
494 dmab.dev.type = SNDRV_DMA_TYPE_DEV;
495 dmab.dev.dev = snd_dma_pci_data(emu->pci);
496
455 for (page = first_page; page <= last_page; page++) { 497 for (page = first_page; page <= last_page; page++) {
456 free_page((unsigned long)emu->page_ptr_table[page]); 498 if (emu->page_ptr_table[page] == NULL)
499 continue;
500 dmab.area = emu->page_ptr_table[page];
501 dmab.addr = emu->page_addr_table[page];
502
503 /*
504 * please keep me in sync with logic in
505 * snd_emu10k1_alloc_pages_maybe_wider()
506 */
507 dmab.bytes = PAGE_SIZE;
508 if (emu->iommu_workaround)
509 dmab.bytes *= 2;
510
511 snd_dma_free_pages(&dmab);
457 emu->page_addr_table[page] = 0; 512 emu->page_addr_table[page] = 0;
458 emu->page_ptr_table[page] = NULL; 513 emu->page_ptr_table[page] = NULL;
459 } 514 }
@@ -465,30 +520,30 @@ static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
465static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) 520static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
466{ 521{
467 int page, first_page, last_page; 522 int page, first_page, last_page;
523 struct snd_dma_buffer dmab;
468 524
469 emu10k1_memblk_init(blk); 525 emu10k1_memblk_init(blk);
470 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); 526 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
471 /* allocate kernel pages */ 527 /* allocate kernel pages */
472 for (page = first_page; page <= last_page; page++) { 528 for (page = first_page; page <= last_page; page++) {
473 /* first try to allocate from <4GB zone */ 529 if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
474 struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 | 530 &dmab) < 0)
475 __GFP_NOWARN); 531 goto __fail;
476 if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) { 532 if (!is_valid_page(emu, dmab.addr)) {
477 if (p) 533 snd_dma_free_pages(&dmab);
478 __free_page(p); 534 goto __fail;
479 /* try to allocate from <16MB zone */
480 p = alloc_page(GFP_ATOMIC | GFP_DMA |
481 __GFP_NORETRY | /* no OOM-killer */
482 __GFP_NOWARN);
483 } 535 }
484 if (!p) { 536 emu->page_addr_table[page] = dmab.addr;
485 __synth_free_pages(emu, first_page, page - 1); 537 emu->page_ptr_table[page] = dmab.area;
486 return -ENOMEM;
487 }
488 emu->page_addr_table[page] = page_to_phys(p);
489 emu->page_ptr_table[page] = page_address(p);
490 } 538 }
491 return 0; 539 return 0;
540
541__fail:
542 /* release allocated pages */
543 last_page = page - 1;
544 __synth_free_pages(emu, first_page, last_page);
545
546 return -ENOMEM;
492} 547}
493 548
494/* 549/*