diff options
author | Takashi Iwai <tiwai@suse.de> | 2009-06-02 09:26:19 -0400 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2009-06-02 09:54:47 -0400 |
commit | c76157d9286ed598c241c212aa5a3c6e5107bd82 (patch) | |
tree | a08d4b06184bf37ee55a20e30f9c66a43cee37c2 /sound/pci/ctxfi/ctvmem.c | |
parent | cd391e206f486955e216a61bd9ebcb0e142122e9 (diff) |
ALSA: ctxfi - Support SG-buffers
Use SG-buffers instead of contiguous pages.
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'sound/pci/ctxfi/ctvmem.c')
-rw-r--r-- | sound/pci/ctxfi/ctvmem.c | 56 |
1 files changed, 22 insertions, 34 deletions
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c index 74a03623d047..b7f8e58ae07d 100644 --- a/sound/pci/ctxfi/ctvmem.c +++ b/sound/pci/ctxfi/ctvmem.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <asm/pgtable.h> | 22 | #include <sound/pcm.h> |
23 | 23 | ||
24 | #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *)) | 24 | #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *)) |
25 | #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE) | 25 | #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE) |
@@ -34,6 +34,13 @@ get_vm_block(struct ct_vm *vm, unsigned int size) | |||
34 | struct ct_vm_block *block = NULL, *entry = NULL; | 34 | struct ct_vm_block *block = NULL, *entry = NULL; |
35 | struct list_head *pos = NULL; | 35 | struct list_head *pos = NULL; |
36 | 36 | ||
37 | size = CT_PAGE_ALIGN(size); | ||
38 | if (size > vm->size) { | ||
39 | printk(KERN_ERR "ctxfi: Fail! No sufficient device virtural " | ||
40 | "memory space available!\n"); | ||
41 | return NULL; | ||
42 | } | ||
43 | |||
37 | mutex_lock(&vm->lock); | 44 | mutex_lock(&vm->lock); |
38 | list_for_each(pos, &vm->unused) { | 45 | list_for_each(pos, &vm->unused) { |
39 | entry = list_entry(pos, struct ct_vm_block, list); | 46 | entry = list_entry(pos, struct ct_vm_block, list); |
@@ -73,6 +80,8 @@ static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block) | |||
73 | struct ct_vm_block *entry = NULL, *pre_ent = NULL; | 80 | struct ct_vm_block *entry = NULL, *pre_ent = NULL; |
74 | struct list_head *pos = NULL, *pre = NULL; | 81 | struct list_head *pos = NULL, *pre = NULL; |
75 | 82 | ||
83 | block->size = CT_PAGE_ALIGN(block->size); | ||
84 | |||
76 | mutex_lock(&vm->lock); | 85 | mutex_lock(&vm->lock); |
77 | list_del(&block->list); | 86 | list_del(&block->list); |
78 | vm->size += block->size; | 87 | vm->size += block->size; |
@@ -115,57 +124,36 @@ static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block) | |||
115 | 124 | ||
116 | /* Map host addr (kmalloced/vmalloced) to device logical addr. */ | 125 | /* Map host addr (kmalloced/vmalloced) to device logical addr. */ |
117 | static struct ct_vm_block * | 126 | static struct ct_vm_block * |
118 | ct_vm_map(struct ct_vm *vm, void *host_addr, int size) | 127 | ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size) |
119 | { | 128 | { |
120 | struct ct_vm_block *block = NULL; | 129 | struct ct_vm_block *block; |
121 | unsigned long pte_start; | 130 | unsigned int pte_start; |
122 | unsigned long i; | 131 | unsigned i, pages; |
123 | unsigned long pages; | ||
124 | unsigned long start_phys; | ||
125 | unsigned long *ptp; | 132 | unsigned long *ptp; |
126 | 133 | ||
127 | /* do mapping */ | 134 | block = get_vm_block(vm, size); |
128 | if ((unsigned long)host_addr >= VMALLOC_START) { | ||
129 | printk(KERN_ERR "ctxfi: " | ||
130 | "Fail! Not support vmalloced addr now!\n"); | ||
131 | return NULL; | ||
132 | } | ||
133 | |||
134 | if (size > vm->size) { | ||
135 | printk(KERN_ERR "ctxfi: Fail! No sufficient device virtural " | ||
136 | "memory space available!\n"); | ||
137 | return NULL; | ||
138 | } | ||
139 | |||
140 | start_phys = (virt_to_phys(host_addr) & CT_PAGE_MASK); | ||
141 | pages = (CT_PAGE_ALIGN(virt_to_phys(host_addr) + size) | ||
142 | - start_phys) >> CT_PAGE_SHIFT; | ||
143 | |||
144 | ptp = vm->ptp[0]; | ||
145 | |||
146 | block = get_vm_block(vm, (pages << CT_PAGE_SHIFT)); | ||
147 | if (block == NULL) { | 135 | if (block == NULL) { |
148 | printk(KERN_ERR "ctxfi: No virtual memory block that is big " | 136 | printk(KERN_ERR "ctxfi: No virtual memory block that is big " |
149 | "enough to allocate!\n"); | 137 | "enough to allocate!\n"); |
150 | return NULL; | 138 | return NULL; |
151 | } | 139 | } |
152 | 140 | ||
141 | ptp = vm->ptp[0]; | ||
153 | pte_start = (block->addr >> CT_PAGE_SHIFT); | 142 | pte_start = (block->addr >> CT_PAGE_SHIFT); |
154 | for (i = 0; i < pages; i++) | 143 | pages = block->size >> CT_PAGE_SHIFT; |
155 | ptp[pte_start+i] = start_phys + (i << CT_PAGE_SHIFT); | 144 | for (i = 0; i < pages; i++) { |
145 | unsigned long addr; | ||
146 | addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT); | ||
147 | ptp[pte_start + i] = addr; | ||
148 | } | ||
156 | 149 | ||
157 | block->addr += (virt_to_phys(host_addr) & (~CT_PAGE_MASK)); | ||
158 | block->size = size; | 150 | block->size = size; |
159 | |||
160 | return block; | 151 | return block; |
161 | } | 152 | } |
162 | 153 | ||
163 | static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block) | 154 | static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block) |
164 | { | 155 | { |
165 | /* do unmapping */ | 156 | /* do unmapping */ |
166 | block->size = ((block->addr + block->size + CT_PAGE_SIZE - 1) | ||
167 | & CT_PAGE_MASK) - (block->addr & CT_PAGE_MASK); | ||
168 | block->addr &= CT_PAGE_MASK; | ||
169 | put_vm_block(vm, block); | 157 | put_vm_block(vm, block); |
170 | } | 158 | } |
171 | 159 | ||