diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-04-08 17:28:13 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-04-08 17:28:13 -0400 |
commit | fd74ea65883c7e6903e9b652795f72b723a2be69 (patch) | |
tree | 0792ad598080eae201d2836ac3c5a8fc46d0d03e /drivers/dma/dmaengine.c | |
parent | c8f517c444e4f9f55b5b5ca202b8404691a35805 (diff) | |
parent | 8c6db1bbf80123839ec87bdd6cb364aea384623d (diff) |
Merge branch 'dmaengine' into async-tx-raid6
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 60 |
1 files changed, 48 insertions, 12 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 280a9d263eb3..92438e9dacc3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -507,6 +507,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
507 | * published in the general-purpose allocator | 507 | * published in the general-purpose allocator |
508 | */ | 508 | */ |
509 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | 509 | dma_cap_set(DMA_PRIVATE, device->cap_mask); |
510 | device->privatecnt++; | ||
510 | err = dma_chan_get(chan); | 511 | err = dma_chan_get(chan); |
511 | 512 | ||
512 | if (err == -ENODEV) { | 513 | if (err == -ENODEV) { |
@@ -518,6 +519,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
518 | dma_chan_name(chan), err); | 519 | dma_chan_name(chan), err); |
519 | else | 520 | else |
520 | break; | 521 | break; |
522 | if (--device->privatecnt == 0) | ||
523 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | ||
521 | chan->private = NULL; | 524 | chan->private = NULL; |
522 | chan = NULL; | 525 | chan = NULL; |
523 | } | 526 | } |
@@ -537,6 +540,9 @@ void dma_release_channel(struct dma_chan *chan) | |||
537 | WARN_ONCE(chan->client_count != 1, | 540 | WARN_ONCE(chan->client_count != 1, |
538 | "chan reference count %d != 1\n", chan->client_count); | 541 | "chan reference count %d != 1\n", chan->client_count); |
539 | dma_chan_put(chan); | 542 | dma_chan_put(chan); |
543 | /* drop PRIVATE cap enabled by __dma_request_channel() */ | ||
544 | if (--chan->device->privatecnt == 0) | ||
545 | dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); | ||
540 | chan->private = NULL; | 546 | chan->private = NULL; |
541 | mutex_unlock(&dma_list_mutex); | 547 | mutex_unlock(&dma_list_mutex); |
542 | } | 548 | } |
@@ -602,6 +608,24 @@ void dmaengine_put(void) | |||
602 | } | 608 | } |
603 | EXPORT_SYMBOL(dmaengine_put); | 609 | EXPORT_SYMBOL(dmaengine_put); |
604 | 610 | ||
611 | static int get_dma_id(struct dma_device *device) | ||
612 | { | ||
613 | int rc; | ||
614 | |||
615 | idr_retry: | ||
616 | if (!idr_pre_get(&dma_idr, GFP_KERNEL)) | ||
617 | return -ENOMEM; | ||
618 | mutex_lock(&dma_list_mutex); | ||
619 | rc = idr_get_new(&dma_idr, NULL, &device->dev_id); | ||
620 | mutex_unlock(&dma_list_mutex); | ||
621 | if (rc == -EAGAIN) | ||
622 | goto idr_retry; | ||
623 | else if (rc != 0) | ||
624 | return rc; | ||
625 | |||
626 | return 0; | ||
627 | } | ||
628 | |||
605 | /** | 629 | /** |
606 | * dma_async_device_register - registers DMA devices found | 630 | * dma_async_device_register - registers DMA devices found |
607 | * @device: &dma_device | 631 | * @device: &dma_device |
@@ -640,27 +664,25 @@ int dma_async_device_register(struct dma_device *device) | |||
640 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); | 664 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); |
641 | if (!idr_ref) | 665 | if (!idr_ref) |
642 | return -ENOMEM; | 666 | return -ENOMEM; |
643 | atomic_set(idr_ref, 0); | 667 | rc = get_dma_id(device); |
644 | idr_retry: | 668 | if (rc != 0) { |
645 | if (!idr_pre_get(&dma_idr, GFP_KERNEL)) | 669 | kfree(idr_ref); |
646 | return -ENOMEM; | ||
647 | mutex_lock(&dma_list_mutex); | ||
648 | rc = idr_get_new(&dma_idr, NULL, &device->dev_id); | ||
649 | mutex_unlock(&dma_list_mutex); | ||
650 | if (rc == -EAGAIN) | ||
651 | goto idr_retry; | ||
652 | else if (rc != 0) | ||
653 | return rc; | 670 | return rc; |
671 | } | ||
672 | |||
673 | atomic_set(idr_ref, 0); | ||
654 | 674 | ||
655 | /* represent channels in sysfs. Probably want devs too */ | 675 | /* represent channels in sysfs. Probably want devs too */ |
656 | list_for_each_entry(chan, &device->channels, device_node) { | 676 | list_for_each_entry(chan, &device->channels, device_node) { |
677 | rc = -ENOMEM; | ||
657 | chan->local = alloc_percpu(typeof(*chan->local)); | 678 | chan->local = alloc_percpu(typeof(*chan->local)); |
658 | if (chan->local == NULL) | 679 | if (chan->local == NULL) |
659 | continue; | 680 | goto err_out; |
660 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); | 681 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); |
661 | if (chan->dev == NULL) { | 682 | if (chan->dev == NULL) { |
662 | free_percpu(chan->local); | 683 | free_percpu(chan->local); |
663 | continue; | 684 | chan->local = NULL; |
685 | goto err_out; | ||
664 | } | 686 | } |
665 | 687 | ||
666 | chan->chan_id = chancnt++; | 688 | chan->chan_id = chancnt++; |
@@ -677,6 +699,8 @@ int dma_async_device_register(struct dma_device *device) | |||
677 | if (rc) { | 699 | if (rc) { |
678 | free_percpu(chan->local); | 700 | free_percpu(chan->local); |
679 | chan->local = NULL; | 701 | chan->local = NULL; |
702 | kfree(chan->dev); | ||
703 | atomic_dec(idr_ref); | ||
680 | goto err_out; | 704 | goto err_out; |
681 | } | 705 | } |
682 | chan->client_count = 0; | 706 | chan->client_count = 0; |
@@ -701,12 +725,23 @@ int dma_async_device_register(struct dma_device *device) | |||
701 | } | 725 | } |
702 | } | 726 | } |
703 | list_add_tail_rcu(&device->global_node, &dma_device_list); | 727 | list_add_tail_rcu(&device->global_node, &dma_device_list); |
728 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
729 | device->privatecnt++; /* Always private */ | ||
704 | dma_channel_rebalance(); | 730 | dma_channel_rebalance(); |
705 | mutex_unlock(&dma_list_mutex); | 731 | mutex_unlock(&dma_list_mutex); |
706 | 732 | ||
707 | return 0; | 733 | return 0; |
708 | 734 | ||
709 | err_out: | 735 | err_out: |
736 | /* if we never registered a channel just release the idr */ | ||
737 | if (atomic_read(idr_ref) == 0) { | ||
738 | mutex_lock(&dma_list_mutex); | ||
739 | idr_remove(&dma_idr, device->dev_id); | ||
740 | mutex_unlock(&dma_list_mutex); | ||
741 | kfree(idr_ref); | ||
742 | return rc; | ||
743 | } | ||
744 | |||
710 | list_for_each_entry(chan, &device->channels, device_node) { | 745 | list_for_each_entry(chan, &device->channels, device_node) { |
711 | if (chan->local == NULL) | 746 | if (chan->local == NULL) |
712 | continue; | 747 | continue; |
@@ -893,6 +928,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
893 | { | 928 | { |
894 | tx->chan = chan; | 929 | tx->chan = chan; |
895 | spin_lock_init(&tx->lock); | 930 | spin_lock_init(&tx->lock); |
931 | INIT_LIST_HEAD(&tx->tx_list); | ||
896 | } | 932 | } |
897 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | 933 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
898 | 934 | ||