diff options
author | Atsushi Nemoto <anemo@mba.ocn.ne.jp> | 2009-03-06 06:07:14 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-03-26 12:48:09 -0400 |
commit | 0f571515c332e00b3515dbe0859ceaa30ab66e00 (patch) | |
tree | 6d67c555714264dda749e387cee6738f770abc5d /drivers/dma | |
parent | e44e0aa3cfa97cddff01704751a4b25151830c72 (diff) |
dmaengine: Add privatecnt to revert DMA_PRIVATE property
Currently dma_request_channel() set DMA_PRIVATE capability but never
clear it. So if a public channel was once grabbed by
dma_request_channel(), the device stay PRIVATE forever. Add
privatecnt member to dma_device to correctly revert it.
[lg@denx.de: fix bad usage of 'chan' in dma_async_device_register]
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/dmaengine.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a41d1ea10fa3..92438e9dacc3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -507,6 +507,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
507 | * published in the general-purpose allocator | 507 | * published in the general-purpose allocator |
508 | */ | 508 | */ |
509 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | 509 | dma_cap_set(DMA_PRIVATE, device->cap_mask); |
510 | device->privatecnt++; | ||
510 | err = dma_chan_get(chan); | 511 | err = dma_chan_get(chan); |
511 | 512 | ||
512 | if (err == -ENODEV) { | 513 | if (err == -ENODEV) { |
@@ -518,6 +519,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
518 | dma_chan_name(chan), err); | 519 | dma_chan_name(chan), err); |
519 | else | 520 | else |
520 | break; | 521 | break; |
522 | if (--device->privatecnt == 0) | ||
523 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | ||
521 | chan->private = NULL; | 524 | chan->private = NULL; |
522 | chan = NULL; | 525 | chan = NULL; |
523 | } | 526 | } |
@@ -537,6 +540,9 @@ void dma_release_channel(struct dma_chan *chan) | |||
537 | WARN_ONCE(chan->client_count != 1, | 540 | WARN_ONCE(chan->client_count != 1, |
538 | "chan reference count %d != 1\n", chan->client_count); | 541 | "chan reference count %d != 1\n", chan->client_count); |
539 | dma_chan_put(chan); | 542 | dma_chan_put(chan); |
543 | /* drop PRIVATE cap enabled by __dma_request_channel() */ | ||
544 | if (--chan->device->privatecnt == 0) | ||
545 | dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); | ||
540 | chan->private = NULL; | 546 | chan->private = NULL; |
541 | mutex_unlock(&dma_list_mutex); | 547 | mutex_unlock(&dma_list_mutex); |
542 | } | 548 | } |
@@ -719,6 +725,8 @@ int dma_async_device_register(struct dma_device *device) | |||
719 | } | 725 | } |
720 | } | 726 | } |
721 | list_add_tail_rcu(&device->global_node, &dma_device_list); | 727 | list_add_tail_rcu(&device->global_node, &dma_device_list); |
728 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
729 | device->privatecnt++; /* Always private */ | ||
722 | dma_channel_rebalance(); | 730 | dma_channel_rebalance(); |
723 | mutex_unlock(&dma_list_mutex); | 731 | mutex_unlock(&dma_list_mutex); |
724 | 732 | ||