diff options
author | Takashi Iwai <tiwai@suse.de> | 2005-11-17 08:50:13 -0500 |
---|---|---|
committer | Jaroslav Kysela <perex@suse.cz> | 2006-01-03 06:18:47 -0500 |
commit | eb4698f347ec908c365504c4edddadd1acd406ea (patch) | |
tree | 4962019c8bf4a52e35ab55137e17aa150edf9661 /sound/pci/emu10k1/memory.c | |
parent | 3d19f804ef5f1d15fe001fc8d1ed58fac9d591fb (diff) |
[ALSA] Remove xxx_t typedefs: PCI emu10k1
Modules: EMU10K1/EMU10K2 driver
Remove xxx_t typedefs from the PCI emu10k1 driver.
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'sound/pci/emu10k1/memory.c')
-rw-r--r-- | sound/pci/emu10k1/memory.c | 94 |
1 files changed, 49 insertions, 45 deletions
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c index d42e4aeaa73a..68c795c03109 100644 --- a/sound/pci/emu10k1/memory.c +++ b/sound/pci/emu10k1/memory.c | |||
@@ -48,7 +48,7 @@ | |||
48 | #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) | 48 | #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) |
49 | #else | 49 | #else |
50 | /* fill PTB entries -- we need to fill UNIT_PAGES entries */ | 50 | /* fill PTB entries -- we need to fill UNIT_PAGES entries */ |
51 | static inline void set_ptb_entry(emu10k1_t *emu, int page, dma_addr_t addr) | 51 | static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) |
52 | { | 52 | { |
53 | int i; | 53 | int i; |
54 | page *= UNIT_PAGES; | 54 | page *= UNIT_PAGES; |
@@ -57,7 +57,7 @@ static inline void set_ptb_entry(emu10k1_t *emu, int page, dma_addr_t addr) | |||
57 | addr += EMUPAGESIZE; | 57 | addr += EMUPAGESIZE; |
58 | } | 58 | } |
59 | } | 59 | } |
60 | static inline void set_silent_ptb(emu10k1_t *emu, int page) | 60 | static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) |
61 | { | 61 | { |
62 | int i; | 62 | int i; |
63 | page *= UNIT_PAGES; | 63 | page *= UNIT_PAGES; |
@@ -70,14 +70,14 @@ static inline void set_silent_ptb(emu10k1_t *emu, int page) | |||
70 | 70 | ||
71 | /* | 71 | /* |
72 | */ | 72 | */ |
73 | static int synth_alloc_pages(emu10k1_t *hw, emu10k1_memblk_t *blk); | 73 | static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); |
74 | static int synth_free_pages(emu10k1_t *hw, emu10k1_memblk_t *blk); | 74 | static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); |
75 | 75 | ||
76 | #define get_emu10k1_memblk(l,member) list_entry(l, emu10k1_memblk_t, member) | 76 | #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member) |
77 | 77 | ||
78 | 78 | ||
79 | /* initialize emu10k1 part */ | 79 | /* initialize emu10k1 part */ |
80 | static void emu10k1_memblk_init(emu10k1_memblk_t *blk) | 80 | static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk) |
81 | { | 81 | { |
82 | blk->mapped_page = -1; | 82 | blk->mapped_page = -1; |
83 | INIT_LIST_HEAD(&blk->mapped_link); | 83 | INIT_LIST_HEAD(&blk->mapped_link); |
@@ -96,7 +96,7 @@ static void emu10k1_memblk_init(emu10k1_memblk_t *blk) | |||
96 | * in nextp | 96 | * in nextp |
97 | * if not found, return a negative error code. | 97 | * if not found, return a negative error code. |
98 | */ | 98 | */ |
99 | static int search_empty_map_area(emu10k1_t *emu, int npages, struct list_head **nextp) | 99 | static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) |
100 | { | 100 | { |
101 | int page = 0, found_page = -ENOMEM; | 101 | int page = 0, found_page = -ENOMEM; |
102 | int max_size = npages; | 102 | int max_size = npages; |
@@ -105,7 +105,7 @@ static int search_empty_map_area(emu10k1_t *emu, int npages, struct list_head ** | |||
105 | struct list_head *pos; | 105 | struct list_head *pos; |
106 | 106 | ||
107 | list_for_each (pos, &emu->mapped_link_head) { | 107 | list_for_each (pos, &emu->mapped_link_head) { |
108 | emu10k1_memblk_t *blk = get_emu10k1_memblk(pos, mapped_link); | 108 | struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link); |
109 | snd_assert(blk->mapped_page >= 0, continue); | 109 | snd_assert(blk->mapped_page >= 0, continue); |
110 | size = blk->mapped_page - page; | 110 | size = blk->mapped_page - page; |
111 | if (size == npages) { | 111 | if (size == npages) { |
@@ -134,7 +134,7 @@ static int search_empty_map_area(emu10k1_t *emu, int npages, struct list_head ** | |||
134 | * | 134 | * |
135 | * call with memblk_lock held | 135 | * call with memblk_lock held |
136 | */ | 136 | */ |
137 | static int map_memblk(emu10k1_t *emu, emu10k1_memblk_t *blk) | 137 | static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) |
138 | { | 138 | { |
139 | int page, pg; | 139 | int page, pg; |
140 | struct list_head *next; | 140 | struct list_head *next; |
@@ -161,11 +161,11 @@ static int map_memblk(emu10k1_t *emu, emu10k1_memblk_t *blk) | |||
161 | * | 161 | * |
162 | * call with memblk_lock held | 162 | * call with memblk_lock held |
163 | */ | 163 | */ |
164 | static int unmap_memblk(emu10k1_t *emu, emu10k1_memblk_t *blk) | 164 | static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) |
165 | { | 165 | { |
166 | int start_page, end_page, mpage, pg; | 166 | int start_page, end_page, mpage, pg; |
167 | struct list_head *p; | 167 | struct list_head *p; |
168 | emu10k1_memblk_t *q; | 168 | struct snd_emu10k1_memblk *q; |
169 | 169 | ||
170 | /* calculate the expected size of empty region */ | 170 | /* calculate the expected size of empty region */ |
171 | if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) { | 171 | if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) { |
@@ -197,11 +197,11 @@ static int unmap_memblk(emu10k1_t *emu, emu10k1_memblk_t *blk) | |||
197 | * | 197 | * |
198 | * unlike synth_alloc the memory block is aligned to the page start | 198 | * unlike synth_alloc the memory block is aligned to the page start |
199 | */ | 199 | */ |
200 | static emu10k1_memblk_t * | 200 | static struct snd_emu10k1_memblk * |
201 | search_empty(emu10k1_t *emu, int size) | 201 | search_empty(struct snd_emu10k1 *emu, int size) |
202 | { | 202 | { |
203 | struct list_head *p; | 203 | struct list_head *p; |
204 | emu10k1_memblk_t *blk; | 204 | struct snd_emu10k1_memblk *blk; |
205 | int page, psize; | 205 | int page, psize; |
206 | 206 | ||
207 | psize = get_aligned_page(size + PAGE_SIZE -1); | 207 | psize = get_aligned_page(size + PAGE_SIZE -1); |
@@ -217,7 +217,7 @@ search_empty(emu10k1_t *emu, int size) | |||
217 | 217 | ||
218 | __found_pages: | 218 | __found_pages: |
219 | /* create a new memory block */ | 219 | /* create a new memory block */ |
220 | blk = (emu10k1_memblk_t *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev); | 220 | blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev); |
221 | if (blk == NULL) | 221 | if (blk == NULL) |
222 | return NULL; | 222 | return NULL; |
223 | blk->mem.offset = aligned_page_offset(page); /* set aligned offset */ | 223 | blk->mem.offset = aligned_page_offset(page); /* set aligned offset */ |
@@ -229,7 +229,7 @@ __found_pages: | |||
229 | /* | 229 | /* |
230 | * check if the given pointer is valid for pages | 230 | * check if the given pointer is valid for pages |
231 | */ | 231 | */ |
232 | static int is_valid_page(emu10k1_t *emu, dma_addr_t addr) | 232 | static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) |
233 | { | 233 | { |
234 | if (addr & ~emu->dma_mask) { | 234 | if (addr & ~emu->dma_mask) { |
235 | snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr); | 235 | snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr); |
@@ -248,12 +248,12 @@ static int is_valid_page(emu10k1_t *emu, dma_addr_t addr) | |||
248 | * if no empty pages are found, tries to release unsed memory blocks | 248 | * if no empty pages are found, tries to release unsed memory blocks |
249 | * and retry the mapping. | 249 | * and retry the mapping. |
250 | */ | 250 | */ |
251 | int snd_emu10k1_memblk_map(emu10k1_t *emu, emu10k1_memblk_t *blk) | 251 | int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) |
252 | { | 252 | { |
253 | int err; | 253 | int err; |
254 | int size; | 254 | int size; |
255 | struct list_head *p, *nextp; | 255 | struct list_head *p, *nextp; |
256 | emu10k1_memblk_t *deleted; | 256 | struct snd_emu10k1_memblk *deleted; |
257 | unsigned long flags; | 257 | unsigned long flags; |
258 | 258 | ||
259 | spin_lock_irqsave(&emu->memblk_lock, flags); | 259 | spin_lock_irqsave(&emu->memblk_lock, flags); |
@@ -288,13 +288,13 @@ int snd_emu10k1_memblk_map(emu10k1_t *emu, emu10k1_memblk_t *blk) | |||
288 | /* | 288 | /* |
289 | * page allocation for DMA | 289 | * page allocation for DMA |
290 | */ | 290 | */ |
291 | snd_util_memblk_t * | 291 | struct snd_util_memblk * |
292 | snd_emu10k1_alloc_pages(emu10k1_t *emu, snd_pcm_substream_t *substream) | 292 | snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream) |
293 | { | 293 | { |
294 | snd_pcm_runtime_t *runtime = substream->runtime; | 294 | struct snd_pcm_runtime *runtime = substream->runtime; |
295 | struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream); | 295 | struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream); |
296 | snd_util_memhdr_t *hdr; | 296 | struct snd_util_memhdr *hdr; |
297 | emu10k1_memblk_t *blk; | 297 | struct snd_emu10k1_memblk *blk; |
298 | int page, err, idx; | 298 | int page, err, idx; |
299 | 299 | ||
300 | snd_assert(emu, return NULL); | 300 | snd_assert(emu, return NULL); |
@@ -336,19 +336,19 @@ snd_emu10k1_alloc_pages(emu10k1_t *emu, snd_pcm_substream_t *substream) | |||
336 | blk->map_locked = 1; /* do not unmap this block! */ | 336 | blk->map_locked = 1; /* do not unmap this block! */ |
337 | err = snd_emu10k1_memblk_map(emu, blk); | 337 | err = snd_emu10k1_memblk_map(emu, blk); |
338 | if (err < 0) { | 338 | if (err < 0) { |
339 | __snd_util_mem_free(hdr, (snd_util_memblk_t *)blk); | 339 | __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); |
340 | up(&hdr->block_mutex); | 340 | up(&hdr->block_mutex); |
341 | return NULL; | 341 | return NULL; |
342 | } | 342 | } |
343 | up(&hdr->block_mutex); | 343 | up(&hdr->block_mutex); |
344 | return (snd_util_memblk_t *)blk; | 344 | return (struct snd_util_memblk *)blk; |
345 | } | 345 | } |
346 | 346 | ||
347 | 347 | ||
348 | /* | 348 | /* |
349 | * release DMA buffer from page table | 349 | * release DMA buffer from page table |
350 | */ | 350 | */ |
351 | int snd_emu10k1_free_pages(emu10k1_t *emu, snd_util_memblk_t *blk) | 351 | int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk) |
352 | { | 352 | { |
353 | snd_assert(emu && blk, return -EINVAL); | 353 | snd_assert(emu && blk, return -EINVAL); |
354 | return snd_emu10k1_synth_free(emu, blk); | 354 | return snd_emu10k1_synth_free(emu, blk); |
@@ -363,26 +363,26 @@ int snd_emu10k1_free_pages(emu10k1_t *emu, snd_util_memblk_t *blk) | |||
363 | /* | 363 | /* |
364 | * allocate a synth sample area | 364 | * allocate a synth sample area |
365 | */ | 365 | */ |
366 | snd_util_memblk_t * | 366 | struct snd_util_memblk * |
367 | snd_emu10k1_synth_alloc(emu10k1_t *hw, unsigned int size) | 367 | snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size) |
368 | { | 368 | { |
369 | emu10k1_memblk_t *blk; | 369 | struct snd_emu10k1_memblk *blk; |
370 | snd_util_memhdr_t *hdr = hw->memhdr; | 370 | struct snd_util_memhdr *hdr = hw->memhdr; |
371 | 371 | ||
372 | down(&hdr->block_mutex); | 372 | down(&hdr->block_mutex); |
373 | blk = (emu10k1_memblk_t *)__snd_util_mem_alloc(hdr, size); | 373 | blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size); |
374 | if (blk == NULL) { | 374 | if (blk == NULL) { |
375 | up(&hdr->block_mutex); | 375 | up(&hdr->block_mutex); |
376 | return NULL; | 376 | return NULL; |
377 | } | 377 | } |
378 | if (synth_alloc_pages(hw, blk)) { | 378 | if (synth_alloc_pages(hw, blk)) { |
379 | __snd_util_mem_free(hdr, (snd_util_memblk_t *)blk); | 379 | __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); |
380 | up(&hdr->block_mutex); | 380 | up(&hdr->block_mutex); |
381 | return NULL; | 381 | return NULL; |
382 | } | 382 | } |
383 | snd_emu10k1_memblk_map(hw, blk); | 383 | snd_emu10k1_memblk_map(hw, blk); |
384 | up(&hdr->block_mutex); | 384 | up(&hdr->block_mutex); |
385 | return (snd_util_memblk_t *)blk; | 385 | return (struct snd_util_memblk *)blk; |
386 | } | 386 | } |
387 | 387 | ||
388 | 388 | ||
@@ -390,10 +390,10 @@ snd_emu10k1_synth_alloc(emu10k1_t *hw, unsigned int size) | |||
390 | * free a synth sample area | 390 | * free a synth sample area |
391 | */ | 391 | */ |
392 | int | 392 | int |
393 | snd_emu10k1_synth_free(emu10k1_t *emu, snd_util_memblk_t *memblk) | 393 | snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk) |
394 | { | 394 | { |
395 | snd_util_memhdr_t *hdr = emu->memhdr; | 395 | struct snd_util_memhdr *hdr = emu->memhdr; |
396 | emu10k1_memblk_t *blk = (emu10k1_memblk_t *)memblk; | 396 | struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk; |
397 | unsigned long flags; | 397 | unsigned long flags; |
398 | 398 | ||
399 | down(&hdr->block_mutex); | 399 | down(&hdr->block_mutex); |
@@ -409,10 +409,12 @@ snd_emu10k1_synth_free(emu10k1_t *emu, snd_util_memblk_t *memblk) | |||
409 | 409 | ||
410 | 410 | ||
411 | /* check new allocation range */ | 411 | /* check new allocation range */ |
412 | static void get_single_page_range(snd_util_memhdr_t *hdr, emu10k1_memblk_t *blk, int *first_page_ret, int *last_page_ret) | 412 | static void get_single_page_range(struct snd_util_memhdr *hdr, |
413 | struct snd_emu10k1_memblk *blk, | ||
414 | int *first_page_ret, int *last_page_ret) | ||
413 | { | 415 | { |
414 | struct list_head *p; | 416 | struct list_head *p; |
415 | emu10k1_memblk_t *q; | 417 | struct snd_emu10k1_memblk *q; |
416 | int first_page, last_page; | 418 | int first_page, last_page; |
417 | first_page = blk->first_page; | 419 | first_page = blk->first_page; |
418 | if ((p = blk->mem.list.prev) != &hdr->block) { | 420 | if ((p = blk->mem.list.prev) != &hdr->block) { |
@@ -433,7 +435,7 @@ static void get_single_page_range(snd_util_memhdr_t *hdr, emu10k1_memblk_t *blk, | |||
433 | /* | 435 | /* |
434 | * allocate kernel pages | 436 | * allocate kernel pages |
435 | */ | 437 | */ |
436 | static int synth_alloc_pages(emu10k1_t *emu, emu10k1_memblk_t *blk) | 438 | static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) |
437 | { | 439 | { |
438 | int page, first_page, last_page; | 440 | int page, first_page, last_page; |
439 | struct snd_dma_buffer dmab; | 441 | struct snd_dma_buffer dmab; |
@@ -472,7 +474,7 @@ __fail: | |||
472 | /* | 474 | /* |
473 | * free pages | 475 | * free pages |
474 | */ | 476 | */ |
475 | static int synth_free_pages(emu10k1_t *emu, emu10k1_memblk_t *blk) | 477 | static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) |
476 | { | 478 | { |
477 | int page, first_page, last_page; | 479 | int page, first_page, last_page; |
478 | struct snd_dma_buffer dmab; | 480 | struct snd_dma_buffer dmab; |
@@ -495,7 +497,7 @@ static int synth_free_pages(emu10k1_t *emu, emu10k1_memblk_t *blk) | |||
495 | } | 497 | } |
496 | 498 | ||
497 | /* calculate buffer pointer from offset address */ | 499 | /* calculate buffer pointer from offset address */ |
498 | static inline void *offset_ptr(emu10k1_t *emu, int page, int offset) | 500 | static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset) |
499 | { | 501 | { |
500 | char *ptr; | 502 | char *ptr; |
501 | snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL); | 503 | snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL); |
@@ -511,11 +513,12 @@ static inline void *offset_ptr(emu10k1_t *emu, int page, int offset) | |||
511 | /* | 513 | /* |
512 | * bzero(blk + offset, size) | 514 | * bzero(blk + offset, size) |
513 | */ | 515 | */ |
514 | int snd_emu10k1_synth_bzero(emu10k1_t *emu, snd_util_memblk_t *blk, int offset, int size) | 516 | int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, |
517 | int offset, int size) | ||
515 | { | 518 | { |
516 | int page, nextofs, end_offset, temp, temp1; | 519 | int page, nextofs, end_offset, temp, temp1; |
517 | void *ptr; | 520 | void *ptr; |
518 | emu10k1_memblk_t *p = (emu10k1_memblk_t *)blk; | 521 | struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; |
519 | 522 | ||
520 | offset += blk->offset & (PAGE_SIZE - 1); | 523 | offset += blk->offset & (PAGE_SIZE - 1); |
521 | end_offset = offset + size; | 524 | end_offset = offset + size; |
@@ -538,11 +541,12 @@ int snd_emu10k1_synth_bzero(emu10k1_t *emu, snd_util_memblk_t *blk, int offset, | |||
538 | /* | 541 | /* |
539 | * copy_from_user(blk + offset, data, size) | 542 | * copy_from_user(blk + offset, data, size) |
540 | */ | 543 | */ |
541 | int snd_emu10k1_synth_copy_from_user(emu10k1_t *emu, snd_util_memblk_t *blk, int offset, const char __user *data, int size) | 544 | int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, |
545 | int offset, const char __user *data, int size) | ||
542 | { | 546 | { |
543 | int page, nextofs, end_offset, temp, temp1; | 547 | int page, nextofs, end_offset, temp, temp1; |
544 | void *ptr; | 548 | void *ptr; |
545 | emu10k1_memblk_t *p = (emu10k1_memblk_t *)blk; | 549 | struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; |
546 | 550 | ||
547 | offset += blk->offset & (PAGE_SIZE - 1); | 551 | offset += blk->offset & (PAGE_SIZE - 1); |
548 | end_offset = offset + size; | 552 | end_offset = offset + size; |