aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2008-05-30 03:47:45 -0400
committerTakashi Iwai <tiwai@suse.de>2008-05-30 03:48:33 -0400
commit8bb8b453cb458d8f62411e78a4cfd6d860b503b6 (patch)
tree6a4fec23d22bcdda779a6f961d092dd4d6e084f1
parent23ce1547638443f0053dd674e728062c48ff0e39 (diff)
[ALSA] trident - clean up obsolete synth codes
Clean up the unused synth codes in the memory handling of trident driver. Signed-off-by: Takashi Iwai <tiwai@suse.de>
-rw-r--r--sound/pci/trident/trident_memory.c178
1 files changed, 0 insertions, 178 deletions
diff --git a/sound/pci/trident/trident_memory.c b/sound/pci/trident/trident_memory.c
index df9b487fa17e..3fd7f1b29b0f 100644
--- a/sound/pci/trident/trident_memory.c
+++ b/sound/pci/trident/trident_memory.c
@@ -310,181 +310,3 @@ int snd_trident_free_pages(struct snd_trident *trident,
310 mutex_unlock(&hdr->block_mutex); 310 mutex_unlock(&hdr->block_mutex);
311 return 0; 311 return 0;
312} 312}
313
314
315/*----------------------------------------------------------------
316 * memory allocation using multiple pages (for synth)
317 *----------------------------------------------------------------
318 * Unlike the DMA allocation above, non-contiguous pages are
319 * assigned to TLB.
320 *----------------------------------------------------------------*/
321
322/*
323 */
324static int synth_alloc_pages(struct snd_trident *hw, struct snd_util_memblk *blk);
325static int synth_free_pages(struct snd_trident *hw, struct snd_util_memblk *blk);
326
327/*
328 * allocate a synth sample area
329 */
330struct snd_util_memblk *
331snd_trident_synth_alloc(struct snd_trident *hw, unsigned int size)
332{
333 struct snd_util_memblk *blk;
334 struct snd_util_memhdr *hdr = hw->tlb.memhdr;
335
336 mutex_lock(&hdr->block_mutex);
337 blk = __snd_util_mem_alloc(hdr, size);
338 if (blk == NULL) {
339 mutex_unlock(&hdr->block_mutex);
340 return NULL;
341 }
342 if (synth_alloc_pages(hw, blk)) {
343 __snd_util_mem_free(hdr, blk);
344 mutex_unlock(&hdr->block_mutex);
345 return NULL;
346 }
347 mutex_unlock(&hdr->block_mutex);
348 return blk;
349}
350
351EXPORT_SYMBOL(snd_trident_synth_alloc);
352
353/*
354 * free a synth sample area
355 */
356int
357snd_trident_synth_free(struct snd_trident *hw, struct snd_util_memblk *blk)
358{
359 struct snd_util_memhdr *hdr = hw->tlb.memhdr;
360
361 mutex_lock(&hdr->block_mutex);
362 synth_free_pages(hw, blk);
363 __snd_util_mem_free(hdr, blk);
364 mutex_unlock(&hdr->block_mutex);
365 return 0;
366}
367
368EXPORT_SYMBOL(snd_trident_synth_free);
369
370/*
371 * reset TLB entry and free kernel page
372 */
373static void clear_tlb(struct snd_trident *trident, int page)
374{
375 void *ptr = page_to_ptr(trident, page);
376 dma_addr_t addr = page_to_addr(trident, page);
377 set_silent_tlb(trident, page);
378 if (ptr) {
379 struct snd_dma_buffer dmab;
380 dmab.dev.type = SNDRV_DMA_TYPE_DEV;
381 dmab.dev.dev = snd_dma_pci_data(trident->pci);
382 dmab.area = ptr;
383 dmab.addr = addr;
384 dmab.bytes = ALIGN_PAGE_SIZE;
385 snd_dma_free_pages(&dmab);
386 }
387}
388
389/* check new allocation range */
390static void get_single_page_range(struct snd_util_memhdr *hdr,
391 struct snd_util_memblk *blk,
392 int *first_page_ret, int *last_page_ret)
393{
394 struct list_head *p;
395 struct snd_util_memblk *q;
396 int first_page, last_page;
397 first_page = firstpg(blk);
398 if ((p = blk->list.prev) != &hdr->block) {
399 q = list_entry(p, struct snd_util_memblk, list);
400 if (lastpg(q) == first_page)
401 first_page++; /* first page was already allocated */
402 }
403 last_page = lastpg(blk);
404 if ((p = blk->list.next) != &hdr->block) {
405 q = list_entry(p, struct snd_util_memblk, list);
406 if (firstpg(q) == last_page)
407 last_page--; /* last page was already allocated */
408 }
409 *first_page_ret = first_page;
410 *last_page_ret = last_page;
411}
412
413/*
414 * allocate kernel pages and assign them to TLB
415 */
416static int synth_alloc_pages(struct snd_trident *hw, struct snd_util_memblk *blk)
417{
418 int page, first_page, last_page;
419 struct snd_dma_buffer dmab;
420
421 firstpg(blk) = get_aligned_page(blk->offset);
422 lastpg(blk) = get_aligned_page(blk->offset + blk->size - 1);
423 get_single_page_range(hw->tlb.memhdr, blk, &first_page, &last_page);
424
425 /* allocate a kernel page for each Trident page -
426 * fortunately Trident page size and kernel PAGE_SIZE is identical!
427 */
428 for (page = first_page; page <= last_page; page++) {
429 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(hw->pci),
430 ALIGN_PAGE_SIZE, &dmab) < 0)
431 goto __fail;
432 if (! is_valid_page(dmab.addr)) {
433 snd_dma_free_pages(&dmab);
434 goto __fail;
435 }
436 set_tlb_bus(hw, page, (unsigned long)dmab.area, dmab.addr);
437 }
438 return 0;
439
440__fail:
441 /* release allocated pages */
442 last_page = page - 1;
443 for (page = first_page; page <= last_page; page++)
444 clear_tlb(hw, page);
445
446 return -ENOMEM;
447}
448
449/*
450 * free pages
451 */
452static int synth_free_pages(struct snd_trident *trident, struct snd_util_memblk *blk)
453{
454 int page, first_page, last_page;
455
456 get_single_page_range(trident->tlb.memhdr, blk, &first_page, &last_page);
457 for (page = first_page; page <= last_page; page++)
458 clear_tlb(trident, page);
459
460 return 0;
461}
462
463/*
464 * copy_from_user(blk + offset, data, size)
465 */
466int snd_trident_synth_copy_from_user(struct snd_trident *trident,
467 struct snd_util_memblk *blk,
468 int offset, const char __user *data, int size)
469{
470 int page, nextofs, end_offset, temp, temp1;
471
472 offset += blk->offset;
473 end_offset = offset + size;
474 page = get_aligned_page(offset) + 1;
475 do {
476 nextofs = aligned_page_offset(page);
477 temp = nextofs - offset;
478 temp1 = end_offset - offset;
479 if (temp1 < temp)
480 temp = temp1;
481 if (copy_from_user(offset_ptr(trident, offset), data, temp))
482 return -EFAULT;
483 offset = nextofs;
484 data += temp;
485 page++;
486 } while (offset < end_offset);
487 return 0;
488}
489
490EXPORT_SYMBOL(snd_trident_synth_copy_from_user);