aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:17 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:17 -0500
commitaa1e6f1a385eb2b04171ec841f3b760091e4a8ee (patch)
tree1401e7f1e867e5d4a769b648605e0317d25d5ccb /drivers/dma/dmaengine.c
parent209b84a88fe81341b4d8d465acc4a67cb7c3feb3 (diff)
dmaengine: kill struct dma_client and supporting infrastructure
All users have been converted to either the general-purpose allocator, dma_find_channel, or dma_request_channel. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c74
1 files changed, 3 insertions, 71 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3f1849b7f5e..9fc91f973a9 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -31,15 +31,12 @@
31 * 31 *
32 * LOCKING: 32 * LOCKING:
33 * 33 *
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list. 34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * Both of these are protected by a mutex, dma_list_mutex. 35 * mutex, dma_list_mutex.
36 * 36 *
37 * Each device has a channels list, which runs unlocked but is never modified 37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver. 38 * once the device is registered, it's just setup by the driver.
39 * 39 *
40 * Each client is responsible for keeping track of the channels it uses. See
41 * the definition of dma_event_callback in dmaengine.h.
42 *
43 * Each device has a kref, which is initialized to 1 when the device is 40 * Each device has a kref, which is initialized to 1 when the device is
44 * registered. A kref_get is done for each device registered. When the 41 * registered. A kref_get is done for each device registered. When the
45 * device is released, the corresponding kref_put is done in the release 42 * device is released, the corresponding kref_put is done in the release
@@ -74,7 +71,6 @@
74 71
75static DEFINE_MUTEX(dma_list_mutex); 72static DEFINE_MUTEX(dma_list_mutex);
76static LIST_HEAD(dma_device_list); 73static LIST_HEAD(dma_device_list);
77static LIST_HEAD(dma_client_list);
78static long dmaengine_ref_count; 74static long dmaengine_ref_count;
79 75
80/* --- sysfs implementation --- */ 76/* --- sysfs implementation --- */
@@ -189,7 +185,7 @@ static int dma_chan_get(struct dma_chan *chan)
189 185
190 /* allocate upon first client reference */ 186 /* allocate upon first client reference */
191 if (chan->client_count == 1 && err == 0) { 187 if (chan->client_count == 1 && err == 0) {
192 int desc_cnt = chan->device->device_alloc_chan_resources(chan, NULL); 188 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
193 189
194 if (desc_cnt < 0) { 190 if (desc_cnt < 0) {
195 err = desc_cnt; 191 err = desc_cnt;
@@ -218,40 +214,6 @@ static void dma_chan_put(struct dma_chan *chan)
218 chan->device->device_free_chan_resources(chan); 214 chan->device->device_free_chan_resources(chan);
219} 215}
220 216
221/**
222 * dma_client_chan_alloc - try to allocate channels to a client
223 * @client: &dma_client
224 *
225 * Called with dma_list_mutex held.
226 */
227static void dma_client_chan_alloc(struct dma_client *client)
228{
229 struct dma_device *device;
230 struct dma_chan *chan;
231 enum dma_state_client ack;
232
233 /* Find a channel */
234 list_for_each_entry(device, &dma_device_list, global_node) {
235 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
236 continue;
237 if (!dma_device_satisfies_mask(device, client->cap_mask))
238 continue;
239
240 list_for_each_entry(chan, &device->channels, device_node) {
241 if (!chan->client_count)
242 continue;
243 ack = client->event_callback(client, chan,
244 DMA_RESOURCE_AVAILABLE);
245
246 /* we are done once this client rejects
247 * an available resource
248 */
249 if (ack == DMA_NAK)
250 return;
251 }
252 }
253}
254
255enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 217enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
256{ 218{
257 enum dma_status status; 219 enum dma_status status;
@@ -585,21 +547,6 @@ void dma_release_channel(struct dma_chan *chan)
585EXPORT_SYMBOL_GPL(dma_release_channel); 547EXPORT_SYMBOL_GPL(dma_release_channel);
586 548
587/** 549/**
588 * dma_chans_notify_available - broadcast available channels to the clients
589 */
590static void dma_clients_notify_available(void)
591{
592 struct dma_client *client;
593
594 mutex_lock(&dma_list_mutex);
595
596 list_for_each_entry(client, &dma_client_list, global_node)
597 dma_client_chan_alloc(client);
598
599 mutex_unlock(&dma_list_mutex);
600}
601
602/**
603 * dmaengine_get - register interest in dma_channels 550 * dmaengine_get - register interest in dma_channels
604 */ 551 */
605void dmaengine_get(void) 552void dmaengine_get(void)
@@ -660,19 +607,6 @@ void dmaengine_put(void)
660EXPORT_SYMBOL(dmaengine_put); 607EXPORT_SYMBOL(dmaengine_put);
661 608
662/** 609/**
663 * dma_async_client_chan_request - send all available channels to the
664 * client that satisfy the capability mask
665 * @client - requester
666 */
667void dma_async_client_chan_request(struct dma_client *client)
668{
669 mutex_lock(&dma_list_mutex);
670 dma_client_chan_alloc(client);
671 mutex_unlock(&dma_list_mutex);
672}
673EXPORT_SYMBOL(dma_async_client_chan_request);
674
675/**
676 * dma_async_device_register - registers DMA devices found 610 * dma_async_device_register - registers DMA devices found
677 * @device: &dma_device 611 * @device: &dma_device
678 */ 612 */
@@ -765,8 +699,6 @@ int dma_async_device_register(struct dma_device *device)
765 dma_channel_rebalance(); 699 dma_channel_rebalance();
766 mutex_unlock(&dma_list_mutex); 700 mutex_unlock(&dma_list_mutex);
767 701
768 dma_clients_notify_available();
769
770 return 0; 702 return 0;
771 703
772err_out: 704err_out: