diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-03-25 12:13:23 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-03-25 12:13:23 -0400 |
commit | 257b17ca030387cb17314cd1851507bdd1b4ddd5 (patch) | |
tree | 74f88050ecfb70e6370399bc8b34843b22472f85 /drivers/dma/dmaengine.c | |
parent | 041b62374c7fedc11a8a1eeda2868612d3d1436c (diff) |
dmaengine: fail device registration if channel registration fails
Atsushi points out:
"If alloc_percpu or kzalloc failed, chan_id does not match with its
position in device->channels list.
And above "continue" looks buggy anyway. Keeping incomplete channels
in device->channels list looks very dangerous..."
Also, fix up leakage of idr_ref in the idr_pre_get() and channel init
fail cases.
Reported-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 51 |
1 files changed, 39 insertions, 12 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 280a9d263eb3..49243d14b894 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -602,6 +602,24 @@ void dmaengine_put(void) | |||
602 | } | 602 | } |
603 | EXPORT_SYMBOL(dmaengine_put); | 603 | EXPORT_SYMBOL(dmaengine_put); |
604 | 604 | ||
605 | static int get_dma_id(struct dma_device *device) | ||
606 | { | ||
607 | int rc; | ||
608 | |||
609 | idr_retry: | ||
610 | if (!idr_pre_get(&dma_idr, GFP_KERNEL)) | ||
611 | return -ENOMEM; | ||
612 | mutex_lock(&dma_list_mutex); | ||
613 | rc = idr_get_new(&dma_idr, NULL, &device->dev_id); | ||
614 | mutex_unlock(&dma_list_mutex); | ||
615 | if (rc == -EAGAIN) | ||
616 | goto idr_retry; | ||
617 | else if (rc != 0) | ||
618 | return rc; | ||
619 | |||
620 | return 0; | ||
621 | } | ||
622 | |||
605 | /** | 623 | /** |
606 | * dma_async_device_register - registers DMA devices found | 624 | * dma_async_device_register - registers DMA devices found |
607 | * @device: &dma_device | 625 | * @device: &dma_device |
@@ -640,27 +658,25 @@ int dma_async_device_register(struct dma_device *device) | |||
640 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); | 658 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); |
641 | if (!idr_ref) | 659 | if (!idr_ref) |
642 | return -ENOMEM; | 660 | return -ENOMEM; |
643 | atomic_set(idr_ref, 0); | 661 | rc = get_dma_id(device); |
644 | idr_retry: | 662 | if (rc != 0) { |
645 | if (!idr_pre_get(&dma_idr, GFP_KERNEL)) | 663 | kfree(idr_ref); |
646 | return -ENOMEM; | ||
647 | mutex_lock(&dma_list_mutex); | ||
648 | rc = idr_get_new(&dma_idr, NULL, &device->dev_id); | ||
649 | mutex_unlock(&dma_list_mutex); | ||
650 | if (rc == -EAGAIN) | ||
651 | goto idr_retry; | ||
652 | else if (rc != 0) | ||
653 | return rc; | 664 | return rc; |
665 | } | ||
666 | |||
667 | atomic_set(idr_ref, 0); | ||
654 | 668 | ||
655 | /* represent channels in sysfs. Probably want devs too */ | 669 | /* represent channels in sysfs. Probably want devs too */ |
656 | list_for_each_entry(chan, &device->channels, device_node) { | 670 | list_for_each_entry(chan, &device->channels, device_node) { |
671 | rc = -ENOMEM; | ||
657 | chan->local = alloc_percpu(typeof(*chan->local)); | 672 | chan->local = alloc_percpu(typeof(*chan->local)); |
658 | if (chan->local == NULL) | 673 | if (chan->local == NULL) |
659 | continue; | 674 | goto err_out; |
660 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); | 675 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); |
661 | if (chan->dev == NULL) { | 676 | if (chan->dev == NULL) { |
662 | free_percpu(chan->local); | 677 | free_percpu(chan->local); |
663 | continue; | 678 | chan->local = NULL; |
679 | goto err_out; | ||
664 | } | 680 | } |
665 | 681 | ||
666 | chan->chan_id = chancnt++; | 682 | chan->chan_id = chancnt++; |
@@ -677,6 +693,8 @@ int dma_async_device_register(struct dma_device *device) | |||
677 | if (rc) { | 693 | if (rc) { |
678 | free_percpu(chan->local); | 694 | free_percpu(chan->local); |
679 | chan->local = NULL; | 695 | chan->local = NULL; |
696 | kfree(chan->dev); | ||
697 | atomic_dec(idr_ref); | ||
680 | goto err_out; | 698 | goto err_out; |
681 | } | 699 | } |
682 | chan->client_count = 0; | 700 | chan->client_count = 0; |
@@ -707,6 +725,15 @@ int dma_async_device_register(struct dma_device *device) | |||
707 | return 0; | 725 | return 0; |
708 | 726 | ||
709 | err_out: | 727 | err_out: |
728 | /* if we never registered a channel just release the idr */ | ||
729 | if (atomic_read(idr_ref) == 0) { | ||
730 | mutex_lock(&dma_list_mutex); | ||
731 | idr_remove(&dma_idr, device->dev_id); | ||
732 | mutex_unlock(&dma_list_mutex); | ||
733 | kfree(idr_ref); | ||
734 | return rc; | ||
735 | } | ||
736 | |||
710 | list_for_each_entry(chan, &device->channels, device_node) { | 737 | list_for_each_entry(chan, &device->channels, device_node) { |
711 | if (chan->local == NULL) | 738 | if (chan->local == NULL) |
712 | continue; | 739 | continue; |