diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:21 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:21 -0500 |
commit | 41d5e59c1299f27983977bcfe3b360600996051c (patch) | |
tree | f0e80b6fea3af04f266843af97f433198ad535c7 /drivers/dma/dmaengine.c | |
parent | 4fac7fa57cf8001be259688468c825f836daf739 (diff) |
dmaengine: add a release for dma class devices and dependent infrastructure
Resolves:
WARNING: at drivers/base/core.c:122 device_release+0x4d/0x52()
Device 'dma0chan0' does not have a release() function, it is broken and must be fixed.
The dma_chan_dev object is introduced to gear-match sysfs kobject and
dmaengine channel lifetimes. When a channel is removed access to the
sysfs entries return -ENODEV until the kobject can be released.
The bulk of the change is updates to existing code to handle the extra
layer of indirection between a dma_chan and its struct device.
Reported-by: Alexander Beregalov <a.beregalov@gmail.com>
Acked-by: Stephen Hemminger <shemminger@vyatta.com>
Cc: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 106 |
1 files changed, 83 insertions, 23 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index cdc8ecfc2c2c..93c4c9ac8997 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -64,36 +64,75 @@ static long dmaengine_ref_count; | |||
64 | 64 | ||
65 | /* --- sysfs implementation --- */ | 65 | /* --- sysfs implementation --- */ |
66 | 66 | ||
67 | /** | ||
68 | * dev_to_dma_chan - convert a device pointer to the its sysfs container object | ||
69 | * @dev - device node | ||
70 | * | ||
71 | * Must be called under dma_list_mutex | ||
72 | */ | ||
73 | static struct dma_chan *dev_to_dma_chan(struct device *dev) | ||
74 | { | ||
75 | struct dma_chan_dev *chan_dev; | ||
76 | |||
77 | chan_dev = container_of(dev, typeof(*chan_dev), device); | ||
78 | return chan_dev->chan; | ||
79 | } | ||
80 | |||
67 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) | 81 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) |
68 | { | 82 | { |
69 | struct dma_chan *chan = to_dma_chan(dev); | 83 | struct dma_chan *chan; |
70 | unsigned long count = 0; | 84 | unsigned long count = 0; |
71 | int i; | 85 | int i; |
86 | int err; | ||
72 | 87 | ||
73 | for_each_possible_cpu(i) | 88 | mutex_lock(&dma_list_mutex); |
74 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | 89 | chan = dev_to_dma_chan(dev); |
90 | if (chan) { | ||
91 | for_each_possible_cpu(i) | ||
92 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | ||
93 | err = sprintf(buf, "%lu\n", count); | ||
94 | } else | ||
95 | err = -ENODEV; | ||
96 | mutex_unlock(&dma_list_mutex); | ||
75 | 97 | ||
76 | return sprintf(buf, "%lu\n", count); | 98 | return err; |
77 | } | 99 | } |
78 | 100 | ||
79 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, | 101 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, |
80 | char *buf) | 102 | char *buf) |
81 | { | 103 | { |
82 | struct dma_chan *chan = to_dma_chan(dev); | 104 | struct dma_chan *chan; |
83 | unsigned long count = 0; | 105 | unsigned long count = 0; |
84 | int i; | 106 | int i; |
107 | int err; | ||
85 | 108 | ||
86 | for_each_possible_cpu(i) | 109 | mutex_lock(&dma_list_mutex); |
87 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | 110 | chan = dev_to_dma_chan(dev); |
111 | if (chan) { | ||
112 | for_each_possible_cpu(i) | ||
113 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | ||
114 | err = sprintf(buf, "%lu\n", count); | ||
115 | } else | ||
116 | err = -ENODEV; | ||
117 | mutex_unlock(&dma_list_mutex); | ||
88 | 118 | ||
89 | return sprintf(buf, "%lu\n", count); | 119 | return err; |
90 | } | 120 | } |
91 | 121 | ||
92 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) | 122 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) |
93 | { | 123 | { |
94 | struct dma_chan *chan = to_dma_chan(dev); | 124 | struct dma_chan *chan; |
125 | int err; | ||
95 | 126 | ||
96 | return sprintf(buf, "%d\n", chan->client_count); | 127 | mutex_lock(&dma_list_mutex); |
128 | chan = dev_to_dma_chan(dev); | ||
129 | if (chan) | ||
130 | err = sprintf(buf, "%d\n", chan->client_count); | ||
131 | else | ||
132 | err = -ENODEV; | ||
133 | mutex_unlock(&dma_list_mutex); | ||
134 | |||
135 | return err; | ||
97 | } | 136 | } |
98 | 137 | ||
99 | static struct device_attribute dma_attrs[] = { | 138 | static struct device_attribute dma_attrs[] = { |
@@ -103,9 +142,18 @@ static struct device_attribute dma_attrs[] = { | |||
103 | __ATTR_NULL | 142 | __ATTR_NULL |
104 | }; | 143 | }; |
105 | 144 | ||
145 | static void chan_dev_release(struct device *dev) | ||
146 | { | ||
147 | struct dma_chan_dev *chan_dev; | ||
148 | |||
149 | chan_dev = container_of(dev, typeof(*chan_dev), device); | ||
150 | kfree(chan_dev); | ||
151 | } | ||
152 | |||
106 | static struct class dma_devclass = { | 153 | static struct class dma_devclass = { |
107 | .name = "dma", | 154 | .name = "dma", |
108 | .dev_attrs = dma_attrs, | 155 | .dev_attrs = dma_attrs, |
156 | .dev_release = chan_dev_release, | ||
109 | }; | 157 | }; |
110 | 158 | ||
111 | /* --- client and device registration --- */ | 159 | /* --- client and device registration --- */ |
@@ -420,7 +468,7 @@ static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_devic | |||
420 | list_for_each_entry(chan, &dev->channels, device_node) { | 468 | list_for_each_entry(chan, &dev->channels, device_node) { |
421 | if (chan->client_count) { | 469 | if (chan->client_count) { |
422 | pr_debug("%s: %s busy\n", | 470 | pr_debug("%s: %s busy\n", |
423 | __func__, dev_name(&chan->dev)); | 471 | __func__, dma_chan_name(chan)); |
424 | continue; | 472 | continue; |
425 | } | 473 | } |
426 | ret = chan; | 474 | ret = chan; |
@@ -466,22 +514,22 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
466 | 514 | ||
467 | if (err == -ENODEV) { | 515 | if (err == -ENODEV) { |
468 | pr_debug("%s: %s module removed\n", __func__, | 516 | pr_debug("%s: %s module removed\n", __func__, |
469 | dev_name(&chan->dev)); | 517 | dma_chan_name(chan)); |
470 | list_del_rcu(&device->global_node); | 518 | list_del_rcu(&device->global_node); |
471 | } else if (err) | 519 | } else if (err) |
472 | pr_err("dmaengine: failed to get %s: (%d)\n", | 520 | pr_err("dmaengine: failed to get %s: (%d)\n", |
473 | dev_name(&chan->dev), err); | 521 | dma_chan_name(chan), err); |
474 | else | 522 | else |
475 | break; | 523 | break; |
476 | } else | 524 | } else |
477 | pr_debug("%s: %s filter said false\n", | 525 | pr_debug("%s: %s filter said false\n", |
478 | __func__, dev_name(&chan->dev)); | 526 | __func__, dma_chan_name(chan)); |
479 | chan = NULL; | 527 | chan = NULL; |
480 | } | 528 | } |
481 | mutex_unlock(&dma_list_mutex); | 529 | mutex_unlock(&dma_list_mutex); |
482 | 530 | ||
483 | pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", | 531 | pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", |
484 | chan ? dev_name(&chan->dev) : NULL); | 532 | chan ? dma_chan_name(chan) : NULL); |
485 | 533 | ||
486 | return chan; | 534 | return chan; |
487 | } | 535 | } |
@@ -521,7 +569,7 @@ void dmaengine_get(void) | |||
521 | break; | 569 | break; |
522 | } else if (err) | 570 | } else if (err) |
523 | pr_err("dmaengine: failed to get %s: (%d)\n", | 571 | pr_err("dmaengine: failed to get %s: (%d)\n", |
524 | dev_name(&chan->dev), err); | 572 | dma_chan_name(chan), err); |
525 | } | 573 | } |
526 | } | 574 | } |
527 | 575 | ||
@@ -601,14 +649,20 @@ int dma_async_device_register(struct dma_device *device) | |||
601 | chan->local = alloc_percpu(typeof(*chan->local)); | 649 | chan->local = alloc_percpu(typeof(*chan->local)); |
602 | if (chan->local == NULL) | 650 | if (chan->local == NULL) |
603 | continue; | 651 | continue; |
652 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); | ||
653 | if (chan->dev == NULL) { | ||
654 | free_percpu(chan->local); | ||
655 | continue; | ||
656 | } | ||
604 | 657 | ||
605 | chan->chan_id = chancnt++; | 658 | chan->chan_id = chancnt++; |
606 | chan->dev.class = &dma_devclass; | 659 | chan->dev->device.class = &dma_devclass; |
607 | chan->dev.parent = device->dev; | 660 | chan->dev->device.parent = device->dev; |
608 | dev_set_name(&chan->dev, "dma%dchan%d", | 661 | chan->dev->chan = chan; |
662 | dev_set_name(&chan->dev->device, "dma%dchan%d", | ||
609 | device->dev_id, chan->chan_id); | 663 | device->dev_id, chan->chan_id); |
610 | 664 | ||
611 | rc = device_register(&chan->dev); | 665 | rc = device_register(&chan->dev->device); |
612 | if (rc) { | 666 | if (rc) { |
613 | free_percpu(chan->local); | 667 | free_percpu(chan->local); |
614 | chan->local = NULL; | 668 | chan->local = NULL; |
@@ -645,7 +699,10 @@ err_out: | |||
645 | list_for_each_entry(chan, &device->channels, device_node) { | 699 | list_for_each_entry(chan, &device->channels, device_node) { |
646 | if (chan->local == NULL) | 700 | if (chan->local == NULL) |
647 | continue; | 701 | continue; |
648 | device_unregister(&chan->dev); | 702 | mutex_lock(&dma_list_mutex); |
703 | chan->dev->chan = NULL; | ||
704 | mutex_unlock(&dma_list_mutex); | ||
705 | device_unregister(&chan->dev->device); | ||
649 | free_percpu(chan->local); | 706 | free_percpu(chan->local); |
650 | } | 707 | } |
651 | return rc; | 708 | return rc; |
@@ -672,7 +729,10 @@ void dma_async_device_unregister(struct dma_device *device) | |||
672 | WARN_ONCE(chan->client_count, | 729 | WARN_ONCE(chan->client_count, |
673 | "%s called while %d clients hold a reference\n", | 730 | "%s called while %d clients hold a reference\n", |
674 | __func__, chan->client_count); | 731 | __func__, chan->client_count); |
675 | device_unregister(&chan->dev); | 732 | mutex_lock(&dma_list_mutex); |
733 | chan->dev->chan = NULL; | ||
734 | mutex_unlock(&dma_list_mutex); | ||
735 | device_unregister(&chan->dev->device); | ||
676 | } | 736 | } |
677 | } | 737 | } |
678 | EXPORT_SYMBOL(dma_async_device_unregister); | 738 | EXPORT_SYMBOL(dma_async_device_unregister); |
@@ -845,7 +905,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
845 | return DMA_SUCCESS; | 905 | return DMA_SUCCESS; |
846 | 906 | ||
847 | WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" | 907 | WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" |
848 | " %s\n", __func__, dev_name(&tx->chan->dev)); | 908 | " %s\n", __func__, dma_chan_name(tx->chan)); |
849 | 909 | ||
850 | /* poll through the dependency chain, return when tx is complete */ | 910 | /* poll through the dependency chain, return when tx is complete */ |
851 | do { | 911 | do { |