aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/dmaengine/client.txt23
-rw-r--r--drivers/dma/dmaengine.c172
-rw-r--r--drivers/dma/edma.c4
-rw-r--r--drivers/dma/omap-dma.c4
-rw-r--r--include/linux/dmaengine.h51
-rw-r--r--include/linux/omap-dma.h6
-rw-r--r--include/linux/platform_data/edma.h7
7 files changed, 191 insertions, 76 deletions
diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt
index 11fb87ff6cd0..4b04d8988708 100644
--- a/Documentation/dmaengine/client.txt
+++ b/Documentation/dmaengine/client.txt
@@ -22,25 +22,14 @@ The slave DMA usage consists of following steps:
22 Channel allocation is slightly different in the slave DMA context, 22 Channel allocation is slightly different in the slave DMA context,
23 client drivers typically need a channel from a particular DMA 23 client drivers typically need a channel from a particular DMA
24 controller only and even in some cases a specific channel is desired. 24 controller only and even in some cases a specific channel is desired.
25 To request a channel dma_request_channel() API is used. 25 To request a channel dma_request_chan() API is used.
26 26
27 Interface: 27 Interface:
28 struct dma_chan *dma_request_channel(dma_cap_mask_t mask, 28 struct dma_chan *dma_request_chan(struct device *dev, const char *name);
29 dma_filter_fn filter_fn, 29
30 void *filter_param); 30 Which will find and return the 'name' DMA channel associated with the 'dev'
31 where dma_filter_fn is defined as: 31 device. The association is done via DT, ACPI or board file based
32 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); 32 dma_slave_map matching table.
33
34 The 'filter_fn' parameter is optional, but highly recommended for
35 slave and cyclic channels as they typically need to obtain a specific
36 DMA channel.
37
38 When the optional 'filter_fn' parameter is NULL, dma_request_channel()
39 simply returns the first channel that satisfies the capability mask.
40
41 Otherwise, the 'filter_fn' routine will be called once for each free
42 channel which has a capability in 'mask'. 'filter_fn' is expected to
43 return 'true' when the desired DMA channel is found.
44 33
45 A channel allocated via this interface is exclusive to the caller, 34 A channel allocated via this interface is exclusive to the caller,
46 until dma_release_channel() is called. 35 until dma_release_channel() is called.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 4aced6689734..21c8c0bce3af 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -43,6 +43,7 @@
43 43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 45
46#include <linux/platform_device.h>
46#include <linux/dma-mapping.h> 47#include <linux/dma-mapping.h>
47#include <linux/init.h> 48#include <linux/init.h>
48#include <linux/module.h> 49#include <linux/module.h>
@@ -512,7 +513,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
512{ 513{
513 struct dma_chan *chan; 514 struct dma_chan *chan;
514 515
515 if (!__dma_device_satisfies_mask(dev, mask)) { 516 if (mask && !__dma_device_satisfies_mask(dev, mask)) {
516 pr_debug("%s: wrong capabilities\n", __func__); 517 pr_debug("%s: wrong capabilities\n", __func__);
517 return NULL; 518 return NULL;
518 } 519 }
@@ -543,6 +544,42 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
543 return NULL; 544 return NULL;
544} 545}
545 546
547static struct dma_chan *find_candidate(struct dma_device *device,
548 const dma_cap_mask_t *mask,
549 dma_filter_fn fn, void *fn_param)
550{
551 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
552 int err;
553
554 if (chan) {
555 /* Found a suitable channel, try to grab, prep, and return it.
556 * We first set DMA_PRIVATE to disable balance_ref_count as this
557 * channel will not be published in the general-purpose
558 * allocator
559 */
560 dma_cap_set(DMA_PRIVATE, device->cap_mask);
561 device->privatecnt++;
562 err = dma_chan_get(chan);
563
564 if (err) {
565 if (err == -ENODEV) {
566 pr_debug("%s: %s module removed\n", __func__,
567 dma_chan_name(chan));
568 list_del_rcu(&device->global_node);
569 } else
570 pr_debug("%s: failed to get %s: (%d)\n",
571 __func__, dma_chan_name(chan), err);
572
573 if (--device->privatecnt == 0)
574 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
575
576 chan = ERR_PTR(err);
577 }
578 }
579
580 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
581}
582
546/** 583/**
547 * dma_get_slave_channel - try to get specific channel exclusively 584 * dma_get_slave_channel - try to get specific channel exclusively
548 * @chan: target channel 585 * @chan: target channel
@@ -581,7 +618,6 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
581{ 618{
582 dma_cap_mask_t mask; 619 dma_cap_mask_t mask;
583 struct dma_chan *chan; 620 struct dma_chan *chan;
584 int err;
585 621
586 dma_cap_zero(mask); 622 dma_cap_zero(mask);
587 dma_cap_set(DMA_SLAVE, mask); 623 dma_cap_set(DMA_SLAVE, mask);
@@ -589,23 +625,11 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
589 /* lock against __dma_request_channel */ 625 /* lock against __dma_request_channel */
590 mutex_lock(&dma_list_mutex); 626 mutex_lock(&dma_list_mutex);
591 627
592 chan = private_candidate(&mask, device, NULL, NULL); 628 chan = find_candidate(device, &mask, NULL, NULL);
593 if (chan) {
594 dma_cap_set(DMA_PRIVATE, device->cap_mask);
595 device->privatecnt++;
596 err = dma_chan_get(chan);
597 if (err) {
598 pr_debug("%s: failed to get %s: (%d)\n",
599 __func__, dma_chan_name(chan), err);
600 chan = NULL;
601 if (--device->privatecnt == 0)
602 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
603 }
604 }
605 629
606 mutex_unlock(&dma_list_mutex); 630 mutex_unlock(&dma_list_mutex);
607 631
608 return chan; 632 return IS_ERR(chan) ? NULL : chan;
609} 633}
610EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); 634EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
611 635
@@ -622,35 +646,15 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
622{ 646{
623 struct dma_device *device, *_d; 647 struct dma_device *device, *_d;
624 struct dma_chan *chan = NULL; 648 struct dma_chan *chan = NULL;
625 int err;
626 649
627 /* Find a channel */ 650 /* Find a channel */
628 mutex_lock(&dma_list_mutex); 651 mutex_lock(&dma_list_mutex);
629 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 652 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
630 chan = private_candidate(mask, device, fn, fn_param); 653 chan = find_candidate(device, mask, fn, fn_param);
631 if (chan) { 654 if (!IS_ERR(chan))
632 /* Found a suitable channel, try to grab, prep, and 655 break;
633 * return it. We first set DMA_PRIVATE to disable
634 * balance_ref_count as this channel will not be
635 * published in the general-purpose allocator
636 */
637 dma_cap_set(DMA_PRIVATE, device->cap_mask);
638 device->privatecnt++;
639 err = dma_chan_get(chan);
640 656
641 if (err == -ENODEV) { 657 chan = NULL;
642 pr_debug("%s: %s module removed\n",
643 __func__, dma_chan_name(chan));
644 list_del_rcu(&device->global_node);
645 } else if (err)
646 pr_debug("%s: failed to get %s: (%d)\n",
647 __func__, dma_chan_name(chan), err);
648 else
649 break;
650 if (--device->privatecnt == 0)
651 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
652 chan = NULL;
653 }
654 } 658 }
655 mutex_unlock(&dma_list_mutex); 659 mutex_unlock(&dma_list_mutex);
656 660
@@ -663,27 +667,73 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
663} 667}
664EXPORT_SYMBOL_GPL(__dma_request_channel); 668EXPORT_SYMBOL_GPL(__dma_request_channel);
665 669
670static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
671 const char *name,
672 struct device *dev)
673{
674 int i;
675
676 if (!device->filter.mapcnt)
677 return NULL;
678
679 for (i = 0; i < device->filter.mapcnt; i++) {
680 const struct dma_slave_map *map = &device->filter.map[i];
681
682 if (!strcmp(map->devname, dev_name(dev)) &&
683 !strcmp(map->slave, name))
684 return map;
685 }
686
687 return NULL;
688}
689
666/** 690/**
667 * dma_request_slave_channel_reason - try to allocate an exclusive slave channel 691 * dma_request_chan - try to allocate an exclusive slave channel
668 * @dev: pointer to client device structure 692 * @dev: pointer to client device structure
669 * @name: slave channel name 693 * @name: slave channel name
670 * 694 *
671 * Returns pointer to appropriate DMA channel on success or an error pointer. 695 * Returns pointer to appropriate DMA channel on success or an error pointer.
672 */ 696 */
673struct dma_chan *dma_request_slave_channel_reason(struct device *dev, 697struct dma_chan *dma_request_chan(struct device *dev, const char *name)
674 const char *name)
675{ 698{
699 struct dma_device *d, *_d;
700 struct dma_chan *chan = NULL;
701
676 /* If device-tree is present get slave info from here */ 702 /* If device-tree is present get slave info from here */
677 if (dev->of_node) 703 if (dev->of_node)
678 return of_dma_request_slave_channel(dev->of_node, name); 704 chan = of_dma_request_slave_channel(dev->of_node, name);
679 705
680 /* If device was enumerated by ACPI get slave info from here */ 706 /* If device was enumerated by ACPI get slave info from here */
681 if (ACPI_HANDLE(dev)) 707 if (has_acpi_companion(dev) && !chan)
682 return acpi_dma_request_slave_chan_by_name(dev, name); 708 chan = acpi_dma_request_slave_chan_by_name(dev, name);
683 709
684 return ERR_PTR(-ENODEV); 710 if (chan) {
711 /* Valid channel found or requester need to be deferred */
712 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
713 return chan;
714 }
715
716 /* Try to find the channel via the DMA filter map(s) */
717 mutex_lock(&dma_list_mutex);
718 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
719 dma_cap_mask_t mask;
720 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
721
722 if (!map)
723 continue;
724
725 dma_cap_zero(mask);
726 dma_cap_set(DMA_SLAVE, mask);
727
728 chan = find_candidate(d, &mask, d->filter.fn, map->param);
729 if (!IS_ERR(chan))
730 break;
731 }
732 mutex_unlock(&dma_list_mutex);
733
734 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
685} 735}
686EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason); 736EXPORT_SYMBOL_GPL(dma_request_chan);
687 737
688/** 738/**
689 * dma_request_slave_channel - try to allocate an exclusive slave channel 739 * dma_request_slave_channel - try to allocate an exclusive slave channel
@@ -695,17 +745,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
695struct dma_chan *dma_request_slave_channel(struct device *dev, 745struct dma_chan *dma_request_slave_channel(struct device *dev,
696 const char *name) 746 const char *name)
697{ 747{
698 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name); 748 struct dma_chan *ch = dma_request_chan(dev, name);
699 if (IS_ERR(ch)) 749 if (IS_ERR(ch))
700 return NULL; 750 return NULL;
701 751
702 dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
703 ch->device->privatecnt++;
704
705 return ch; 752 return ch;
706} 753}
707EXPORT_SYMBOL_GPL(dma_request_slave_channel); 754EXPORT_SYMBOL_GPL(dma_request_slave_channel);
708 755
756/**
757 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
758 * @mask: capabilities that the channel must satisfy
759 *
760 * Returns pointer to appropriate DMA channel on success or an error pointer.
761 */
762struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
763{
764 struct dma_chan *chan;
765
766 if (!mask)
767 return ERR_PTR(-ENODEV);
768
769 chan = __dma_request_channel(mask, NULL, NULL);
770 if (!chan)
771 chan = ERR_PTR(-ENODEV);
772
773 return chan;
774}
775EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
776
709void dma_release_channel(struct dma_chan *chan) 777void dma_release_channel(struct dma_chan *chan)
710{ 778{
711 mutex_lock(&dma_list_mutex); 779 mutex_lock(&dma_list_mutex);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 5317ae642d1c..6b3e9d991010 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -2297,6 +2297,10 @@ static int edma_probe(struct platform_device *pdev)
2297 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); 2297 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2298 } 2298 }
2299 2299
2300 ecc->dma_slave.filter.map = info->slave_map;
2301 ecc->dma_slave.filter.mapcnt = info->slavecnt;
2302 ecc->dma_slave.filter.fn = edma_filter_fn;
2303
2300 ret = dma_async_device_register(&ecc->dma_slave); 2304 ret = dma_async_device_register(&ecc->dma_slave);
2301 if (ret) { 2305 if (ret) {
2302 dev_err(dev, "slave ddev registration failed (%d)\n", ret); 2306 dev_err(dev, "slave ddev registration failed (%d)\n", ret);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 1dfc71c90123..48f77c289cd3 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1203,6 +1203,10 @@ static int omap_dma_probe(struct platform_device *pdev)
1203 return rc; 1203 return rc;
1204 } 1204 }
1205 1205
1206 od->ddev.filter.map = od->plat->slave_map;
1207 od->ddev.filter.mapcnt = od->plat->slavecnt;
1208 od->ddev.filter.fn = omap_dma_filter_fn;
1209
1206 rc = dma_async_device_register(&od->ddev); 1210 rc = dma_async_device_register(&od->ddev);
1207 if (rc) { 1211 if (rc) {
1208 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", 1212 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 6f94b5cbd97c..8ab3bafc2332 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -607,11 +607,38 @@ enum dmaengine_alignment {
607}; 607};
608 608
609/** 609/**
610 * struct dma_slave_map - associates slave device and it's slave channel with
611 * parameter to be used by a filter function
612 * @devname: name of the device
613 * @slave: slave channel name
614 * @param: opaque parameter to pass to struct dma_filter.fn
615 */
616struct dma_slave_map {
617 const char *devname;
618 const char *slave;
619 void *param;
620};
621
622/**
623 * struct dma_filter - information for slave device/channel to filter_fn/param
624 * mapping
625 * @fn: filter function callback
626 * @mapcnt: number of slave device/channel in the map
627 * @map: array of channel to filter mapping data
628 */
629struct dma_filter {
630 dma_filter_fn fn;
631 int mapcnt;
632 const struct dma_slave_map *map;
633};
634
635/**
610 * struct dma_device - info on the entity supplying DMA services 636 * struct dma_device - info on the entity supplying DMA services
611 * @chancnt: how many DMA channels are supported 637 * @chancnt: how many DMA channels are supported
612 * @privatecnt: how many DMA channels are requested by dma_request_channel 638 * @privatecnt: how many DMA channels are requested by dma_request_channel
613 * @channels: the list of struct dma_chan 639 * @channels: the list of struct dma_chan
614 * @global_node: list_head for global dma_device_list 640 * @global_node: list_head for global dma_device_list
641 * @filter: information for device/slave to filter function/param mapping
615 * @cap_mask: one or more dma_capability flags 642 * @cap_mask: one or more dma_capability flags
616 * @max_xor: maximum number of xor sources, 0 if no capability 643 * @max_xor: maximum number of xor sources, 0 if no capability
617 * @max_pq: maximum number of PQ sources and PQ-continue capability 644 * @max_pq: maximum number of PQ sources and PQ-continue capability
@@ -667,6 +694,7 @@ struct dma_device {
667 unsigned int privatecnt; 694 unsigned int privatecnt;
668 struct list_head channels; 695 struct list_head channels;
669 struct list_head global_node; 696 struct list_head global_node;
697 struct dma_filter filter;
670 dma_cap_mask_t cap_mask; 698 dma_cap_mask_t cap_mask;
671 unsigned short max_xor; 699 unsigned short max_xor;
672 unsigned short max_pq; 700 unsigned short max_pq;
@@ -1142,9 +1170,11 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1142void dma_issue_pending_all(void); 1170void dma_issue_pending_all(void);
1143struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1171struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1144 dma_filter_fn fn, void *fn_param); 1172 dma_filter_fn fn, void *fn_param);
1145struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
1146 const char *name);
1147struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 1173struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1174
1175struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1176struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1177
1148void dma_release_channel(struct dma_chan *chan); 1178void dma_release_channel(struct dma_chan *chan);
1149int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); 1179int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1150#else 1180#else
@@ -1168,16 +1198,21 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1168{ 1198{
1169 return NULL; 1199 return NULL;
1170} 1200}
1171static inline struct dma_chan *dma_request_slave_channel_reason(
1172 struct device *dev, const char *name)
1173{
1174 return ERR_PTR(-ENODEV);
1175}
1176static inline struct dma_chan *dma_request_slave_channel(struct device *dev, 1201static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1177 const char *name) 1202 const char *name)
1178{ 1203{
1179 return NULL; 1204 return NULL;
1180} 1205}
1206static inline struct dma_chan *dma_request_chan(struct device *dev,
1207 const char *name)
1208{
1209 return ERR_PTR(-ENODEV);
1210}
1211static inline struct dma_chan *dma_request_chan_by_mask(
1212 const dma_cap_mask_t *mask)
1213{
1214 return ERR_PTR(-ENODEV);
1215}
1181static inline void dma_release_channel(struct dma_chan *chan) 1216static inline void dma_release_channel(struct dma_chan *chan)
1182{ 1217{
1183} 1218}
@@ -1188,6 +1223,8 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
1188} 1223}
1189#endif 1224#endif
1190 1225
1226#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
1227
1191static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) 1228static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1192{ 1229{
1193 struct dma_slave_caps caps; 1230 struct dma_slave_caps caps;
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 88fa8af2b937..1d99b61adc65 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -267,6 +267,9 @@ struct omap_dma_reg {
267 u8 type; 267 u8 type;
268}; 268};
269 269
270#define SDMA_FILTER_PARAM(hw_req) ((int[]) { (hw_req) })
271struct dma_slave_map;
272
270/* System DMA platform data structure */ 273/* System DMA platform data structure */
271struct omap_system_dma_plat_info { 274struct omap_system_dma_plat_info {
272 const struct omap_dma_reg *reg_map; 275 const struct omap_dma_reg *reg_map;
@@ -278,6 +281,9 @@ struct omap_system_dma_plat_info {
278 void (*clear_dma)(int lch); 281 void (*clear_dma)(int lch);
279 void (*dma_write)(u32 val, int reg, int lch); 282 void (*dma_write)(u32 val, int reg, int lch);
280 u32 (*dma_read)(int reg, int lch); 283 u32 (*dma_read)(int reg, int lch);
284
285 const struct dma_slave_map *slave_map;
286 int slavecnt;
281}; 287};
282 288
283#ifdef CONFIG_ARCH_OMAP2PLUS 289#ifdef CONFIG_ARCH_OMAP2PLUS
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index e2878baeb90e..105700e62ea1 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -53,12 +53,16 @@ enum dma_event_q {
53#define EDMA_CTLR(i) ((i) >> 16) 53#define EDMA_CTLR(i) ((i) >> 16)
54#define EDMA_CHAN_SLOT(i) ((i) & 0xffff) 54#define EDMA_CHAN_SLOT(i) ((i) & 0xffff)
55 55
56#define EDMA_FILTER_PARAM(ctlr, chan) ((int[]) { EDMA_CTLR_CHAN(ctlr, chan) })
57
56struct edma_rsv_info { 58struct edma_rsv_info {
57 59
58 const s16 (*rsv_chans)[2]; 60 const s16 (*rsv_chans)[2];
59 const s16 (*rsv_slots)[2]; 61 const s16 (*rsv_slots)[2];
60}; 62};
61 63
64struct dma_slave_map;
65
62/* platform_data for EDMA driver */ 66/* platform_data for EDMA driver */
63struct edma_soc_info { 67struct edma_soc_info {
64 /* 68 /*
@@ -76,6 +80,9 @@ struct edma_soc_info {
76 80
77 s8 (*queue_priority_mapping)[2]; 81 s8 (*queue_priority_mapping)[2];
78 const s16 (*xbar_chans)[2]; 82 const s16 (*xbar_chans)[2];
83
84 const struct dma_slave_map *slave_map;
85 int slavecnt;
79}; 86};
80 87
81#endif 88#endif