aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dmaengine.c74
-rw-r--r--drivers/dma/dw_dmac.c3
-rw-r--r--drivers/dma/fsldma.c3
-rw-r--r--drivers/dma/ioat_dma.c5
-rw-r--r--drivers/dma/iop-adma.c7
-rw-r--r--drivers/dma/mv_xor.c7
6 files changed, 13 insertions, 86 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3f1849b7f5ef..9fc91f973a9a 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -31,15 +31,12 @@
31 * 31 *
32 * LOCKING: 32 * LOCKING:
33 * 33 *
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list. 34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * Both of these are protected by a mutex, dma_list_mutex. 35 * mutex, dma_list_mutex.
36 * 36 *
37 * Each device has a channels list, which runs unlocked but is never modified 37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver. 38 * once the device is registered, it's just setup by the driver.
39 * 39 *
40 * Each client is responsible for keeping track of the channels it uses. See
41 * the definition of dma_event_callback in dmaengine.h.
42 *
43 * Each device has a kref, which is initialized to 1 when the device is 40 * Each device has a kref, which is initialized to 1 when the device is
44 * registered. A kref_get is done for each device registered. When the 41 * registered. A kref_get is done for each device registered. When the
45 * device is released, the corresponding kref_put is done in the release 42 * device is released, the corresponding kref_put is done in the release
@@ -74,7 +71,6 @@
74 71
75static DEFINE_MUTEX(dma_list_mutex); 72static DEFINE_MUTEX(dma_list_mutex);
76static LIST_HEAD(dma_device_list); 73static LIST_HEAD(dma_device_list);
77static LIST_HEAD(dma_client_list);
78static long dmaengine_ref_count; 74static long dmaengine_ref_count;
79 75
80/* --- sysfs implementation --- */ 76/* --- sysfs implementation --- */
@@ -189,7 +185,7 @@ static int dma_chan_get(struct dma_chan *chan)
189 185
190 /* allocate upon first client reference */ 186 /* allocate upon first client reference */
191 if (chan->client_count == 1 && err == 0) { 187 if (chan->client_count == 1 && err == 0) {
192 int desc_cnt = chan->device->device_alloc_chan_resources(chan, NULL); 188 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
193 189
194 if (desc_cnt < 0) { 190 if (desc_cnt < 0) {
195 err = desc_cnt; 191 err = desc_cnt;
@@ -218,40 +214,6 @@ static void dma_chan_put(struct dma_chan *chan)
218 chan->device->device_free_chan_resources(chan); 214 chan->device->device_free_chan_resources(chan);
219} 215}
220 216
221/**
222 * dma_client_chan_alloc - try to allocate channels to a client
223 * @client: &dma_client
224 *
225 * Called with dma_list_mutex held.
226 */
227static void dma_client_chan_alloc(struct dma_client *client)
228{
229 struct dma_device *device;
230 struct dma_chan *chan;
231 enum dma_state_client ack;
232
233 /* Find a channel */
234 list_for_each_entry(device, &dma_device_list, global_node) {
235 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
236 continue;
237 if (!dma_device_satisfies_mask(device, client->cap_mask))
238 continue;
239
240 list_for_each_entry(chan, &device->channels, device_node) {
241 if (!chan->client_count)
242 continue;
243 ack = client->event_callback(client, chan,
244 DMA_RESOURCE_AVAILABLE);
245
246 /* we are done once this client rejects
247 * an available resource
248 */
249 if (ack == DMA_NAK)
250 return;
251 }
252 }
253}
254
255enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 217enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
256{ 218{
257 enum dma_status status; 219 enum dma_status status;
@@ -585,21 +547,6 @@ void dma_release_channel(struct dma_chan *chan)
585EXPORT_SYMBOL_GPL(dma_release_channel); 547EXPORT_SYMBOL_GPL(dma_release_channel);
586 548
587/** 549/**
588 * dma_chans_notify_available - broadcast available channels to the clients
589 */
590static void dma_clients_notify_available(void)
591{
592 struct dma_client *client;
593
594 mutex_lock(&dma_list_mutex);
595
596 list_for_each_entry(client, &dma_client_list, global_node)
597 dma_client_chan_alloc(client);
598
599 mutex_unlock(&dma_list_mutex);
600}
601
602/**
603 * dmaengine_get - register interest in dma_channels 550 * dmaengine_get - register interest in dma_channels
604 */ 551 */
605void dmaengine_get(void) 552void dmaengine_get(void)
@@ -660,19 +607,6 @@ void dmaengine_put(void)
660EXPORT_SYMBOL(dmaengine_put); 607EXPORT_SYMBOL(dmaengine_put);
661 608
662/** 609/**
663 * dma_async_client_chan_request - send all available channels to the
664 * client that satisfy the capability mask
665 * @client - requester
666 */
667void dma_async_client_chan_request(struct dma_client *client)
668{
669 mutex_lock(&dma_list_mutex);
670 dma_client_chan_alloc(client);
671 mutex_unlock(&dma_list_mutex);
672}
673EXPORT_SYMBOL(dma_async_client_chan_request);
674
675/**
676 * dma_async_device_register - registers DMA devices found 610 * dma_async_device_register - registers DMA devices found
677 * @device: &dma_device 611 * @device: &dma_device
678 */ 612 */
@@ -765,8 +699,6 @@ int dma_async_device_register(struct dma_device *device)
765 dma_channel_rebalance(); 699 dma_channel_rebalance();
766 mutex_unlock(&dma_list_mutex); 700 mutex_unlock(&dma_list_mutex);
767 701
768 dma_clients_notify_available();
769
770 return 0; 702 return 0;
771 703
772err_out: 704err_out:
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index dbd50804e5d2..a29dda8f801b 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -758,8 +758,7 @@ static void dwc_issue_pending(struct dma_chan *chan)
758 spin_unlock_bh(&dwc->lock); 758 spin_unlock_bh(&dwc->lock);
759} 759}
760 760
761static int dwc_alloc_chan_resources(struct dma_chan *chan, 761static int dwc_alloc_chan_resources(struct dma_chan *chan)
762 struct dma_client *client)
763{ 762{
764 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 763 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
765 struct dw_dma *dw = to_dw_dma(chan->device); 764 struct dw_dma *dw = to_dw_dma(chan->device);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0b95dcce447e..46e0128929a0 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -366,8 +366,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
366 * 366 *
367 * Return - The number of descriptors allocated. 367 * Return - The number of descriptors allocated.
368 */ 368 */
369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan, 369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
370 struct dma_client *client)
371{ 370{
372 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 371 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
373 372
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 6607fdd00b1c..e42e1aea0f18 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -734,8 +734,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
734 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors 734 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
735 * @chan: the channel to be filled out 735 * @chan: the channel to be filled out
736 */ 736 */
737static int ioat_dma_alloc_chan_resources(struct dma_chan *chan, 737static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
738 struct dma_client *client)
739{ 738{
740 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 739 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
741 struct ioat_desc_sw *desc; 740 struct ioat_desc_sw *desc;
@@ -1381,7 +1380,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1381 dma_chan = container_of(device->common.channels.next, 1380 dma_chan = container_of(device->common.channels.next,
1382 struct dma_chan, 1381 struct dma_chan,
1383 device_node); 1382 device_node);
1384 if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) { 1383 if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
1385 dev_err(&device->pdev->dev, 1384 dev_err(&device->pdev->dev,
1386 "selftest cannot allocate chan resource\n"); 1385 "selftest cannot allocate chan resource\n");
1387 err = -ENODEV; 1386 err = -ENODEV;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index be9ea9f88805..c74ac9eb009a 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -470,8 +470,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
470 * greater than 2x the number slots needed to satisfy a device->max_xor 470 * greater than 2x the number slots needed to satisfy a device->max_xor
471 * request. 471 * request.
472 * */ 472 * */
473static int iop_adma_alloc_chan_resources(struct dma_chan *chan, 473static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
474 struct dma_client *client)
475{ 474{
476 char *hw_desc; 475 char *hw_desc;
477 int idx; 476 int idx;
@@ -865,7 +864,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
865 dma_chan = container_of(device->common.channels.next, 864 dma_chan = container_of(device->common.channels.next,
866 struct dma_chan, 865 struct dma_chan,
867 device_node); 866 device_node);
868 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { 867 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
869 err = -ENODEV; 868 err = -ENODEV;
870 goto out; 869 goto out;
871 } 870 }
@@ -963,7 +962,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
963 dma_chan = container_of(device->common.channels.next, 962 dma_chan = container_of(device->common.channels.next,
964 struct dma_chan, 963 struct dma_chan,
965 device_node); 964 device_node);
966 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { 965 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
967 err = -ENODEV; 966 err = -ENODEV;
968 goto out; 967 goto out;
969 } 968 }
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 3f46df3390c7..fbaa2f6225e2 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -606,8 +606,7 @@ submit_done:
606} 606}
607 607
608/* returns the number of allocated descriptors */ 608/* returns the number of allocated descriptors */
609static int mv_xor_alloc_chan_resources(struct dma_chan *chan, 609static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
610 struct dma_client *client)
611{ 610{
612 char *hw_desc; 611 char *hw_desc;
613 int idx; 612 int idx;
@@ -957,7 +956,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
957 dma_chan = container_of(device->common.channels.next, 956 dma_chan = container_of(device->common.channels.next,
958 struct dma_chan, 957 struct dma_chan,
959 device_node); 958 device_node);
960 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { 959 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
961 err = -ENODEV; 960 err = -ENODEV;
962 goto out; 961 goto out;
963 } 962 }
@@ -1052,7 +1051,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1052 dma_chan = container_of(device->common.channels.next, 1051 dma_chan = container_of(device->common.channels.next,
1053 struct dma_chan, 1052 struct dma_chan,
1054 device_node); 1053 device_node);
1055 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { 1054 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1056 err = -ENODEV; 1055 err = -ENODEV;
1057 goto out; 1056 goto out;
1058 } 1057 }