diff options
Diffstat (limited to 'sound/core/sgbuf.c')
-rw-r--r-- | sound/core/sgbuf.c | 62 |
1 files changed, 46 insertions, 16 deletions
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c index cefd228cd2a..d4564edd61d 100644 --- a/sound/core/sgbuf.c +++ b/sound/core/sgbuf.c | |||
@@ -41,9 +41,11 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab) | |||
41 | tmpb.dev.type = SNDRV_DMA_TYPE_DEV; | 41 | tmpb.dev.type = SNDRV_DMA_TYPE_DEV; |
42 | tmpb.dev.dev = sgbuf->dev; | 42 | tmpb.dev.dev = sgbuf->dev; |
43 | for (i = 0; i < sgbuf->pages; i++) { | 43 | for (i = 0; i < sgbuf->pages; i++) { |
44 | if (!(sgbuf->table[i].addr & ~PAGE_MASK)) | ||
45 | continue; /* continuous pages */ | ||
44 | tmpb.area = sgbuf->table[i].buf; | 46 | tmpb.area = sgbuf->table[i].buf; |
45 | tmpb.addr = sgbuf->table[i].addr; | 47 | tmpb.addr = sgbuf->table[i].addr & PAGE_MASK; |
46 | tmpb.bytes = PAGE_SIZE; | 48 | tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; |
47 | snd_dma_free_pages(&tmpb); | 49 | snd_dma_free_pages(&tmpb); |
48 | } | 50 | } |
49 | if (dmab->area) | 51 | if (dmab->area) |
@@ -58,13 +60,17 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab) | |||
58 | return 0; | 60 | return 0; |
59 | } | 61 | } |
60 | 62 | ||
63 | #define MAX_ALLOC_PAGES 32 | ||
64 | |||
61 | void *snd_malloc_sgbuf_pages(struct device *device, | 65 | void *snd_malloc_sgbuf_pages(struct device *device, |
62 | size_t size, struct snd_dma_buffer *dmab, | 66 | size_t size, struct snd_dma_buffer *dmab, |
63 | size_t *res_size) | 67 | size_t *res_size) |
64 | { | 68 | { |
65 | struct snd_sg_buf *sgbuf; | 69 | struct snd_sg_buf *sgbuf; |
66 | unsigned int i, pages; | 70 | unsigned int i, pages, chunk, maxpages; |
67 | struct snd_dma_buffer tmpb; | 71 | struct snd_dma_buffer tmpb; |
72 | struct snd_sg_page *table; | ||
73 | struct page **pgtable; | ||
68 | 74 | ||
69 | dmab->area = NULL; | 75 | dmab->area = NULL; |
70 | dmab->addr = 0; | 76 | dmab->addr = 0; |
@@ -74,31 +80,55 @@ void *snd_malloc_sgbuf_pages(struct device *device, | |||
74 | sgbuf->dev = device; | 80 | sgbuf->dev = device; |
75 | pages = snd_sgbuf_aligned_pages(size); | 81 | pages = snd_sgbuf_aligned_pages(size); |
76 | sgbuf->tblsize = sgbuf_align_table(pages); | 82 | sgbuf->tblsize = sgbuf_align_table(pages); |
77 | sgbuf->table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->table), GFP_KERNEL); | 83 | table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL); |
78 | if (! sgbuf->table) | 84 | if (!table) |
79 | goto _failed; | 85 | goto _failed; |
80 | sgbuf->page_table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->page_table), GFP_KERNEL); | 86 | sgbuf->table = table; |
81 | if (! sgbuf->page_table) | 87 | pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL); |
88 | if (!pgtable) | ||
82 | goto _failed; | 89 | goto _failed; |
90 | sgbuf->page_table = pgtable; | ||
83 | 91 | ||
84 | /* allocate each page */ | 92 | /* allocate pages */ |
85 | for (i = 0; i < pages; i++) { | 93 | maxpages = MAX_ALLOC_PAGES; |
86 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, device, PAGE_SIZE, &tmpb) < 0) { | 94 | while (pages > 0) { |
87 | if (res_size == NULL) | 95 | chunk = pages; |
96 | /* don't be too eager to take a huge chunk */ | ||
97 | if (chunk > maxpages) | ||
98 | chunk = maxpages; | ||
99 | chunk <<= PAGE_SHIFT; | ||
100 | if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device, | ||
101 | chunk, &tmpb) < 0) { | ||
102 | if (!sgbuf->pages) | ||
103 | return NULL; | ||
104 | if (!res_size) | ||
88 | goto _failed; | 105 | goto _failed; |
89 | *res_size = size = sgbuf->pages * PAGE_SIZE; | 106 | size = sgbuf->pages * PAGE_SIZE; |
90 | break; | 107 | break; |
91 | } | 108 | } |
92 | sgbuf->table[i].buf = tmpb.area; | 109 | chunk = tmpb.bytes >> PAGE_SHIFT; |
93 | sgbuf->table[i].addr = tmpb.addr; | 110 | for (i = 0; i < chunk; i++) { |
94 | sgbuf->page_table[i] = virt_to_page(tmpb.area); | 111 | table->buf = tmpb.area; |
95 | sgbuf->pages++; | 112 | table->addr = tmpb.addr; |
113 | if (!i) | ||
114 | table->addr |= chunk; /* mark head */ | ||
115 | table++; | ||
116 | *pgtable++ = virt_to_page(tmpb.area); | ||
117 | tmpb.area += PAGE_SIZE; | ||
118 | tmpb.addr += PAGE_SIZE; | ||
119 | } | ||
120 | sgbuf->pages += chunk; | ||
121 | pages -= chunk; | ||
122 | if (chunk < maxpages) | ||
123 | maxpages = chunk; | ||
96 | } | 124 | } |
97 | 125 | ||
98 | sgbuf->size = size; | 126 | sgbuf->size = size; |
99 | dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL); | 127 | dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL); |
100 | if (! dmab->area) | 128 | if (! dmab->area) |
101 | goto _failed; | 129 | goto _failed; |
130 | if (res_size) | ||
131 | *res_size = sgbuf->size; | ||
102 | return dmab->area; | 132 | return dmab->area; |
103 | 133 | ||
104 | _failed: | 134 | _failed: |