diff options
Diffstat (limited to 'sound/pci/ctxfi/ctvmem.c')
-rw-r--r-- | sound/pci/ctxfi/ctvmem.c | 77 |
1 files changed, 36 insertions, 41 deletions
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c index cecf77e3ee86..b7f8e58ae07d 100644 --- a/sound/pci/ctxfi/ctvmem.c +++ b/sound/pci/ctxfi/ctvmem.c | |||
@@ -18,12 +18,11 @@ | |||
18 | #include "ctvmem.h" | 18 | #include "ctvmem.h" |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | #include <asm/page.h> /* for PAGE_SIZE macro definition */ | ||
22 | #include <linux/io.h> | 21 | #include <linux/io.h> |
23 | #include <asm/pgtable.h> | 22 | #include <sound/pcm.h> |
24 | 23 | ||
25 | #define CT_PTES_PER_PAGE (PAGE_SIZE / sizeof(void *)) | 24 | #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *)) |
26 | #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * PAGE_SIZE) | 25 | #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE) |
27 | 26 | ||
28 | /* * | 27 | /* * |
29 | * Find or create vm block based on requested @size. | 28 | * Find or create vm block based on requested @size. |
@@ -35,25 +34,34 @@ get_vm_block(struct ct_vm *vm, unsigned int size) | |||
35 | struct ct_vm_block *block = NULL, *entry = NULL; | 34 | struct ct_vm_block *block = NULL, *entry = NULL; |
36 | struct list_head *pos = NULL; | 35 | struct list_head *pos = NULL; |
37 | 36 | ||
37 | size = CT_PAGE_ALIGN(size); | ||
38 | if (size > vm->size) { | ||
39 | printk(KERN_ERR "ctxfi: Fail! No sufficient device virtural " | ||
40 | "memory space available!\n"); | ||
41 | return NULL; | ||
42 | } | ||
43 | |||
44 | mutex_lock(&vm->lock); | ||
38 | list_for_each(pos, &vm->unused) { | 45 | list_for_each(pos, &vm->unused) { |
39 | entry = list_entry(pos, struct ct_vm_block, list); | 46 | entry = list_entry(pos, struct ct_vm_block, list); |
40 | if (entry->size >= size) | 47 | if (entry->size >= size) |
41 | break; /* found a block that is big enough */ | 48 | break; /* found a block that is big enough */ |
42 | } | 49 | } |
43 | if (pos == &vm->unused) | 50 | if (pos == &vm->unused) |
44 | return NULL; | 51 | goto out; |
45 | 52 | ||
46 | if (entry->size == size) { | 53 | if (entry->size == size) { |
47 | /* Move the vm node from unused list to used list directly */ | 54 | /* Move the vm node from unused list to used list directly */ |
48 | list_del(&entry->list); | 55 | list_del(&entry->list); |
49 | list_add(&entry->list, &vm->used); | 56 | list_add(&entry->list, &vm->used); |
50 | vm->size -= size; | 57 | vm->size -= size; |
51 | return entry; | 58 | block = entry; |
59 | goto out; | ||
52 | } | 60 | } |
53 | 61 | ||
54 | block = kzalloc(sizeof(*block), GFP_KERNEL); | 62 | block = kzalloc(sizeof(*block), GFP_KERNEL); |
55 | if (NULL == block) | 63 | if (NULL == block) |
56 | return NULL; | 64 | goto out; |
57 | 65 | ||
58 | block->addr = entry->addr; | 66 | block->addr = entry->addr; |
59 | block->size = size; | 67 | block->size = size; |
@@ -62,6 +70,8 @@ get_vm_block(struct ct_vm *vm, unsigned int size) | |||
62 | entry->size -= size; | 70 | entry->size -= size; |
63 | vm->size -= size; | 71 | vm->size -= size; |
64 | 72 | ||
73 | out: | ||
74 | mutex_unlock(&vm->lock); | ||
65 | return block; | 75 | return block; |
66 | } | 76 | } |
67 | 77 | ||
@@ -70,6 +80,9 @@ static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block) | |||
70 | struct ct_vm_block *entry = NULL, *pre_ent = NULL; | 80 | struct ct_vm_block *entry = NULL, *pre_ent = NULL; |
71 | struct list_head *pos = NULL, *pre = NULL; | 81 | struct list_head *pos = NULL, *pre = NULL; |
72 | 82 | ||
83 | block->size = CT_PAGE_ALIGN(block->size); | ||
84 | |||
85 | mutex_lock(&vm->lock); | ||
73 | list_del(&block->list); | 86 | list_del(&block->list); |
74 | vm->size += block->size; | 87 | vm->size += block->size; |
75 | 88 | ||
@@ -106,61 +119,41 @@ static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block) | |||
106 | pos = pre; | 119 | pos = pre; |
107 | pre = pos->prev; | 120 | pre = pos->prev; |
108 | } | 121 | } |
122 | mutex_unlock(&vm->lock); | ||
109 | } | 123 | } |
110 | 124 | ||
111 | /* Map host addr (kmalloced/vmalloced) to device logical addr. */ | 125 | /* Map host addr (kmalloced/vmalloced) to device logical addr. */ |
112 | static struct ct_vm_block * | 126 | static struct ct_vm_block * |
113 | ct_vm_map(struct ct_vm *vm, void *host_addr, int size) | 127 | ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size) |
114 | { | 128 | { |
115 | struct ct_vm_block *block = NULL; | 129 | struct ct_vm_block *block; |
116 | unsigned long pte_start; | 130 | unsigned int pte_start; |
117 | unsigned long i; | 131 | unsigned i, pages; |
118 | unsigned long pages; | ||
119 | unsigned long start_phys; | ||
120 | unsigned long *ptp; | 132 | unsigned long *ptp; |
121 | 133 | ||
122 | /* do mapping */ | 134 | block = get_vm_block(vm, size); |
123 | if ((unsigned long)host_addr >= VMALLOC_START) { | ||
124 | printk(KERN_ERR "ctxfi: " | ||
125 | "Fail! Not support vmalloced addr now!\n"); | ||
126 | return NULL; | ||
127 | } | ||
128 | |||
129 | if (size > vm->size) { | ||
130 | printk(KERN_ERR "ctxfi: Fail! No sufficient device virtural " | ||
131 | "memory space available!\n"); | ||
132 | return NULL; | ||
133 | } | ||
134 | |||
135 | start_phys = (virt_to_phys(host_addr) & PAGE_MASK); | ||
136 | pages = (PAGE_ALIGN(virt_to_phys(host_addr) + size) | ||
137 | - start_phys) >> PAGE_SHIFT; | ||
138 | |||
139 | ptp = vm->ptp[0]; | ||
140 | |||
141 | block = get_vm_block(vm, (pages << PAGE_SHIFT)); | ||
142 | if (block == NULL) { | 135 | if (block == NULL) { |
143 | printk(KERN_ERR "ctxfi: No virtual memory block that is big " | 136 | printk(KERN_ERR "ctxfi: No virtual memory block that is big " |
144 | "enough to allocate!\n"); | 137 | "enough to allocate!\n"); |
145 | return NULL; | 138 | return NULL; |
146 | } | 139 | } |
147 | 140 | ||
148 | pte_start = (block->addr >> PAGE_SHIFT); | 141 | ptp = vm->ptp[0]; |
149 | for (i = 0; i < pages; i++) | 142 | pte_start = (block->addr >> CT_PAGE_SHIFT); |
150 | ptp[pte_start+i] = start_phys + (i << PAGE_SHIFT); | 143 | pages = block->size >> CT_PAGE_SHIFT; |
144 | for (i = 0; i < pages; i++) { | ||
145 | unsigned long addr; | ||
146 | addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT); | ||
147 | ptp[pte_start + i] = addr; | ||
148 | } | ||
151 | 149 | ||
152 | block->addr += (virt_to_phys(host_addr) & (~PAGE_MASK)); | ||
153 | block->size = size; | 150 | block->size = size; |
154 | |||
155 | return block; | 151 | return block; |
156 | } | 152 | } |
157 | 153 | ||
158 | static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block) | 154 | static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block) |
159 | { | 155 | { |
160 | /* do unmapping */ | 156 | /* do unmapping */ |
161 | block->size = ((block->addr + block->size + PAGE_SIZE - 1) | ||
162 | & PAGE_MASK) - (block->addr & PAGE_MASK); | ||
163 | block->addr &= PAGE_MASK; | ||
164 | put_vm_block(vm, block); | 157 | put_vm_block(vm, block); |
165 | } | 158 | } |
166 | 159 | ||
@@ -191,6 +184,8 @@ int ct_vm_create(struct ct_vm **rvm) | |||
191 | if (NULL == vm) | 184 | if (NULL == vm) |
192 | return -ENOMEM; | 185 | return -ENOMEM; |
193 | 186 | ||
187 | mutex_init(&vm->lock); | ||
188 | |||
194 | /* Allocate page table pages */ | 189 | /* Allocate page table pages */ |
195 | for (i = 0; i < CT_PTP_NUM; i++) { | 190 | for (i = 0; i < CT_PTP_NUM; i++) { |
196 | vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); | 191 | vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); |