diff options
author | Atsushi Nemoto <anemo@mba.ocn.ne.jp> | 2009-03-06 06:07:14 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-03-26 12:48:09 -0400 |
commit | 0f571515c332e00b3515dbe0859ceaa30ab66e00 (patch) | |
tree | 6d67c555714264dda749e387cee6738f770abc5d | |
parent | e44e0aa3cfa97cddff01704751a4b25151830c72 (diff) |
dmaengine: Add privatecnt to revert DMA_PRIVATE property
Currently dma_request_channel() set DMA_PRIVATE capability but never
clear it. So if a public channel was once grabbed by
dma_request_channel(), the device stay PRIVATE forever. Add
privatecnt member to dma_device to correctly revert it.
[lg@denx.de: fix bad usage of 'chan' in dma_async_device_register]
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | drivers/dma/dmaengine.c | 8 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 9 |
2 files changed, 17 insertions, 0 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a41d1ea10fa3..92438e9dacc3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -507,6 +507,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
507 | * published in the general-purpose allocator | 507 | * published in the general-purpose allocator |
508 | */ | 508 | */ |
509 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | 509 | dma_cap_set(DMA_PRIVATE, device->cap_mask); |
510 | device->privatecnt++; | ||
510 | err = dma_chan_get(chan); | 511 | err = dma_chan_get(chan); |
511 | 512 | ||
512 | if (err == -ENODEV) { | 513 | if (err == -ENODEV) { |
@@ -518,6 +519,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
518 | dma_chan_name(chan), err); | 519 | dma_chan_name(chan), err); |
519 | else | 520 | else |
520 | break; | 521 | break; |
522 | if (--device->privatecnt == 0) | ||
523 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | ||
521 | chan->private = NULL; | 524 | chan->private = NULL; |
522 | chan = NULL; | 525 | chan = NULL; |
523 | } | 526 | } |
@@ -537,6 +540,9 @@ void dma_release_channel(struct dma_chan *chan) | |||
537 | WARN_ONCE(chan->client_count != 1, | 540 | WARN_ONCE(chan->client_count != 1, |
538 | "chan reference count %d != 1\n", chan->client_count); | 541 | "chan reference count %d != 1\n", chan->client_count); |
539 | dma_chan_put(chan); | 542 | dma_chan_put(chan); |
543 | /* drop PRIVATE cap enabled by __dma_request_channel() */ | ||
544 | if (--chan->device->privatecnt == 0) | ||
545 | dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); | ||
540 | chan->private = NULL; | 546 | chan->private = NULL; |
541 | mutex_unlock(&dma_list_mutex); | 547 | mutex_unlock(&dma_list_mutex); |
542 | } | 548 | } |
@@ -719,6 +725,8 @@ int dma_async_device_register(struct dma_device *device) | |||
719 | } | 725 | } |
720 | } | 726 | } |
721 | list_add_tail_rcu(&device->global_node, &dma_device_list); | 727 | list_add_tail_rcu(&device->global_node, &dma_device_list); |
728 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
729 | device->privatecnt++; /* Always private */ | ||
722 | dma_channel_rebalance(); | 730 | dma_channel_rebalance(); |
723 | mutex_unlock(&dma_list_mutex); | 731 | mutex_unlock(&dma_list_mutex); |
724 | 732 | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 2afc2c95e42d..2e2aa3df170c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -202,6 +202,7 @@ struct dma_async_tx_descriptor { | |||
202 | /** | 202 | /** |
203 | * struct dma_device - info on the entity supplying DMA services | 203 | * struct dma_device - info on the entity supplying DMA services |
204 | * @chancnt: how many DMA channels are supported | 204 | * @chancnt: how many DMA channels are supported |
205 | * @privatecnt: how many DMA channels are requested by dma_request_channel | ||
205 | * @channels: the list of struct dma_chan | 206 | * @channels: the list of struct dma_chan |
206 | * @global_node: list_head for global dma_device_list | 207 | * @global_node: list_head for global dma_device_list |
207 | * @cap_mask: one or more dma_capability flags | 208 | * @cap_mask: one or more dma_capability flags |
@@ -224,6 +225,7 @@ struct dma_async_tx_descriptor { | |||
224 | struct dma_device { | 225 | struct dma_device { |
225 | 226 | ||
226 | unsigned int chancnt; | 227 | unsigned int chancnt; |
228 | unsigned int privatecnt; | ||
227 | struct list_head channels; | 229 | struct list_head channels; |
228 | struct list_head global_node; | 230 | struct list_head global_node; |
229 | dma_cap_mask_t cap_mask; | 231 | dma_cap_mask_t cap_mask; |
@@ -352,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | |||
352 | set_bit(tx_type, dstp->bits); | 354 | set_bit(tx_type, dstp->bits); |
353 | } | 355 | } |
354 | 356 | ||
357 | #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) | ||
358 | static inline void | ||
359 | __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | ||
360 | { | ||
361 | clear_bit(tx_type, dstp->bits); | ||
362 | } | ||
363 | |||
355 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) | 364 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) |
356 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) | 365 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) |
357 | { | 366 | { |