diff options
author | Peter Ujfalusi <peter.ujfalusi@ti.com> | 2015-12-14 15:47:40 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-12-18 00:47:26 -0500 |
commit | a8135d0d79e9d0ad3a4ff494fceeaae838becf38 (patch) | |
tree | 3aba1f9650cf172838575013e74ce905c67418fc | |
parent | 7bd903c5ca47fde5ad52370a47776491813c772e (diff) |
dmaengine: core: Introduce new, universal API to request a channel
The two API function can cover most, if not all current APIs used to
request a channel. With minimal effort dmaengine drivers, platforms and
dmaengine user drivers can be converted to use the two function.
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
To request any channel matching with the requested capabilities, can be
used to request channel for memcpy, memset, xor, etc where no hardware
synchronization is needed.
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
To request a slave channel. The dma_request_chan() will try to find the
channel via DT, ACPI or in case if the kernel booted in non DT/ACPI mode
it will use a filter lookup table and retrieves the needed information from
the dma_slave_map provided by the DMA drivers.
This legacy mode needs changes in platform code, in dmaengine drivers and
finally the dmaengine user drivers can be converted:
For each dmaengine driver an array of DMA device, slave and the parameter
for the filter function needs to be added:
static const struct dma_slave_map da830_edma_map[] = {
{ "davinci-mcasp.0", "rx", EDMA_FILTER_PARAM(0, 0) },
{ "davinci-mcasp.0", "tx", EDMA_FILTER_PARAM(0, 1) },
{ "davinci-mcasp.1", "rx", EDMA_FILTER_PARAM(0, 2) },
{ "davinci-mcasp.1", "tx", EDMA_FILTER_PARAM(0, 3) },
{ "davinci-mcasp.2", "rx", EDMA_FILTER_PARAM(0, 4) },
{ "davinci-mcasp.2", "tx", EDMA_FILTER_PARAM(0, 5) },
{ "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 14) },
{ "spi_davinci.0", "tx", EDMA_FILTER_PARAM(0, 15) },
{ "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 16) },
{ "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 17) },
{ "spi_davinci.1", "rx", EDMA_FILTER_PARAM(0, 18) },
{ "spi_davinci.1", "tx", EDMA_FILTER_PARAM(0, 19) },
};
This information is going to be needed by the dmaengine driver, so
modification to the platform_data is needed, and the driver map should be
added to the pdata of the DMA driver:
da8xx_edma0_pdata.slave_map = da830_edma_map;
da8xx_edma0_pdata.slavecnt = ARRAY_SIZE(da830_edma_map);
The DMA driver then needs to configure the needed device -> filter_fn
mapping before it registers with dma_async_device_register() :
ecc->dma_slave.filter_map.map = info->slave_map;
ecc->dma_slave.filter_map.mapcnt = info->slavecnt;
ecc->dma_slave.filter_map.fn = edma_filter_fn;
When neither DT or ACPI lookup is available the dma_request_chan() will
try to match the requester's device name with the filter_map's list of
device names, when a match found it will use the information from the
dma_slave_map to get the channel with the dma_get_channel() internal
function.
Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | Documentation/dmaengine/client.txt | 23 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 89 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 51 |
3 files changed, 127 insertions, 36 deletions
diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt index 11fb87ff6cd0..4b04d8988708 100644 --- a/Documentation/dmaengine/client.txt +++ b/Documentation/dmaengine/client.txt | |||
@@ -22,25 +22,14 @@ The slave DMA usage consists of following steps: | |||
22 | Channel allocation is slightly different in the slave DMA context, | 22 | Channel allocation is slightly different in the slave DMA context, |
23 | client drivers typically need a channel from a particular DMA | 23 | client drivers typically need a channel from a particular DMA |
24 | controller only and even in some cases a specific channel is desired. | 24 | controller only and even in some cases a specific channel is desired. |
25 | To request a channel dma_request_channel() API is used. | 25 | To request a channel dma_request_chan() API is used. |
26 | 26 | ||
27 | Interface: | 27 | Interface: |
28 | struct dma_chan *dma_request_channel(dma_cap_mask_t mask, | 28 | struct dma_chan *dma_request_chan(struct device *dev, const char *name); |
29 | dma_filter_fn filter_fn, | 29 | |
30 | void *filter_param); | 30 | Which will find and return the 'name' DMA channel associated with the 'dev' |
31 | where dma_filter_fn is defined as: | 31 | device. The association is done via DT, ACPI or board file based |
32 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | 32 | dma_slave_map matching table. |
33 | |||
34 | The 'filter_fn' parameter is optional, but highly recommended for | ||
35 | slave and cyclic channels as they typically need to obtain a specific | ||
36 | DMA channel. | ||
37 | |||
38 | When the optional 'filter_fn' parameter is NULL, dma_request_channel() | ||
39 | simply returns the first channel that satisfies the capability mask. | ||
40 | |||
41 | Otherwise, the 'filter_fn' routine will be called once for each free | ||
42 | channel which has a capability in 'mask'. 'filter_fn' is expected to | ||
43 | return 'true' when the desired DMA channel is found. | ||
44 | 33 | ||
45 | A channel allocated via this interface is exclusive to the caller, | 34 | A channel allocated via this interface is exclusive to the caller, |
46 | until dma_release_channel() is called. | 35 | until dma_release_channel() is called. |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 81a36fc445a7..a094dbb54f46 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -43,6 +43,7 @@ | |||
43 | 43 | ||
44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
45 | 45 | ||
46 | #include <linux/platform_device.h> | ||
46 | #include <linux/dma-mapping.h> | 47 | #include <linux/dma-mapping.h> |
47 | #include <linux/init.h> | 48 | #include <linux/init.h> |
48 | #include <linux/module.h> | 49 | #include <linux/module.h> |
@@ -665,27 +666,73 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | |||
665 | } | 666 | } |
666 | EXPORT_SYMBOL_GPL(__dma_request_channel); | 667 | EXPORT_SYMBOL_GPL(__dma_request_channel); |
667 | 668 | ||
669 | static const struct dma_slave_map *dma_filter_match(struct dma_device *device, | ||
670 | const char *name, | ||
671 | struct device *dev) | ||
672 | { | ||
673 | int i; | ||
674 | |||
675 | if (!device->filter.mapcnt) | ||
676 | return NULL; | ||
677 | |||
678 | for (i = 0; i < device->filter.mapcnt; i++) { | ||
679 | const struct dma_slave_map *map = &device->filter.map[i]; | ||
680 | |||
681 | if (!strcmp(map->devname, dev_name(dev)) && | ||
682 | !strcmp(map->slave, name)) | ||
683 | return map; | ||
684 | } | ||
685 | |||
686 | return NULL; | ||
687 | } | ||
688 | |||
668 | /** | 689 | /** |
669 | * dma_request_slave_channel_reason - try to allocate an exclusive slave channel | 690 | * dma_request_chan - try to allocate an exclusive slave channel |
670 | * @dev: pointer to client device structure | 691 | * @dev: pointer to client device structure |
671 | * @name: slave channel name | 692 | * @name: slave channel name |
672 | * | 693 | * |
673 | * Returns pointer to appropriate DMA channel on success or an error pointer. | 694 | * Returns pointer to appropriate DMA channel on success or an error pointer. |
674 | */ | 695 | */ |
675 | struct dma_chan *dma_request_slave_channel_reason(struct device *dev, | 696 | struct dma_chan *dma_request_chan(struct device *dev, const char *name) |
676 | const char *name) | ||
677 | { | 697 | { |
698 | struct dma_device *d, *_d; | ||
699 | struct dma_chan *chan = NULL; | ||
700 | |||
678 | /* If device-tree is present get slave info from here */ | 701 | /* If device-tree is present get slave info from here */ |
679 | if (dev->of_node) | 702 | if (dev->of_node) |
680 | return of_dma_request_slave_channel(dev->of_node, name); | 703 | chan = of_dma_request_slave_channel(dev->of_node, name); |
681 | 704 | ||
682 | /* If device was enumerated by ACPI get slave info from here */ | 705 | /* If device was enumerated by ACPI get slave info from here */ |
683 | if (ACPI_HANDLE(dev)) | 706 | if (has_acpi_companion(dev) && !chan) |
684 | return acpi_dma_request_slave_chan_by_name(dev, name); | 707 | chan = acpi_dma_request_slave_chan_by_name(dev, name); |
708 | |||
709 | if (chan) { | ||
710 | /* Valid channel found or requester need to be deferred */ | ||
711 | if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) | ||
712 | return chan; | ||
713 | } | ||
714 | |||
715 | /* Try to find the channel via the DMA filter map(s) */ | ||
716 | mutex_lock(&dma_list_mutex); | ||
717 | list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { | ||
718 | dma_cap_mask_t mask; | ||
719 | const struct dma_slave_map *map = dma_filter_match(d, name, dev); | ||
720 | |||
721 | if (!map) | ||
722 | continue; | ||
723 | |||
724 | dma_cap_zero(mask); | ||
725 | dma_cap_set(DMA_SLAVE, mask); | ||
685 | 726 | ||
686 | return ERR_PTR(-ENODEV); | 727 | chan = find_candidate(d, &mask, d->filter.fn, map->param); |
728 | if (!IS_ERR(chan)) | ||
729 | break; | ||
730 | } | ||
731 | mutex_unlock(&dma_list_mutex); | ||
732 | |||
733 | return chan ? chan : ERR_PTR(-EPROBE_DEFER); | ||
687 | } | 734 | } |
688 | EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason); | 735 | EXPORT_SYMBOL_GPL(dma_request_chan); |
689 | 736 | ||
690 | /** | 737 | /** |
691 | * dma_request_slave_channel - try to allocate an exclusive slave channel | 738 | * dma_request_slave_channel - try to allocate an exclusive slave channel |
@@ -697,17 +744,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason); | |||
697 | struct dma_chan *dma_request_slave_channel(struct device *dev, | 744 | struct dma_chan *dma_request_slave_channel(struct device *dev, |
698 | const char *name) | 745 | const char *name) |
699 | { | 746 | { |
700 | struct dma_chan *ch = dma_request_slave_channel_reason(dev, name); | 747 | struct dma_chan *ch = dma_request_chan(dev, name); |
701 | if (IS_ERR(ch)) | 748 | if (IS_ERR(ch)) |
702 | return NULL; | 749 | return NULL; |
703 | 750 | ||
704 | dma_cap_set(DMA_PRIVATE, ch->device->cap_mask); | ||
705 | ch->device->privatecnt++; | ||
706 | |||
707 | return ch; | 751 | return ch; |
708 | } | 752 | } |
709 | EXPORT_SYMBOL_GPL(dma_request_slave_channel); | 753 | EXPORT_SYMBOL_GPL(dma_request_slave_channel); |
710 | 754 | ||
755 | /** | ||
756 | * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities | ||
757 | * @mask: capabilities that the channel must satisfy | ||
758 | * | ||
759 | * Returns pointer to appropriate DMA channel on success or an error pointer. | ||
760 | */ | ||
761 | struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) | ||
762 | { | ||
763 | struct dma_chan *chan; | ||
764 | |||
765 | if (!mask) | ||
766 | return ERR_PTR(-ENODEV); | ||
767 | |||
768 | chan = __dma_request_channel(mask, NULL, NULL); | ||
769 | if (!chan) | ||
770 | chan = ERR_PTR(-ENODEV); | ||
771 | |||
772 | return chan; | ||
773 | } | ||
774 | EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); | ||
775 | |||
711 | void dma_release_channel(struct dma_chan *chan) | 776 | void dma_release_channel(struct dma_chan *chan) |
712 | { | 777 | { |
713 | mutex_lock(&dma_list_mutex); | 778 | mutex_lock(&dma_list_mutex); |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c47c68e535e8..d50a6b51a73d 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -607,11 +607,38 @@ enum dmaengine_alignment { | |||
607 | }; | 607 | }; |
608 | 608 | ||
609 | /** | 609 | /** |
610 | * struct dma_slave_map - associates slave device and it's slave channel with | ||
611 | * parameter to be used by a filter function | ||
612 | * @devname: name of the device | ||
613 | * @slave: slave channel name | ||
614 | * @param: opaque parameter to pass to struct dma_filter.fn | ||
615 | */ | ||
616 | struct dma_slave_map { | ||
617 | const char *devname; | ||
618 | const char *slave; | ||
619 | void *param; | ||
620 | }; | ||
621 | |||
622 | /** | ||
623 | * struct dma_filter - information for slave device/channel to filter_fn/param | ||
624 | * mapping | ||
625 | * @fn: filter function callback | ||
626 | * @mapcnt: number of slave device/channel in the map | ||
627 | * @map: array of channel to filter mapping data | ||
628 | */ | ||
629 | struct dma_filter { | ||
630 | dma_filter_fn fn; | ||
631 | int mapcnt; | ||
632 | const struct dma_slave_map *map; | ||
633 | }; | ||
634 | |||
635 | /** | ||
610 | * struct dma_device - info on the entity supplying DMA services | 636 | * struct dma_device - info on the entity supplying DMA services |
611 | * @chancnt: how many DMA channels are supported | 637 | * @chancnt: how many DMA channels are supported |
612 | * @privatecnt: how many DMA channels are requested by dma_request_channel | 638 | * @privatecnt: how many DMA channels are requested by dma_request_channel |
613 | * @channels: the list of struct dma_chan | 639 | * @channels: the list of struct dma_chan |
614 | * @global_node: list_head for global dma_device_list | 640 | * @global_node: list_head for global dma_device_list |
641 | * @filter: information for device/slave to filter function/param mapping | ||
615 | * @cap_mask: one or more dma_capability flags | 642 | * @cap_mask: one or more dma_capability flags |
616 | * @max_xor: maximum number of xor sources, 0 if no capability | 643 | * @max_xor: maximum number of xor sources, 0 if no capability |
617 | * @max_pq: maximum number of PQ sources and PQ-continue capability | 644 | * @max_pq: maximum number of PQ sources and PQ-continue capability |
@@ -666,6 +693,7 @@ struct dma_device { | |||
666 | unsigned int privatecnt; | 693 | unsigned int privatecnt; |
667 | struct list_head channels; | 694 | struct list_head channels; |
668 | struct list_head global_node; | 695 | struct list_head global_node; |
696 | struct dma_filter filter; | ||
669 | dma_cap_mask_t cap_mask; | 697 | dma_cap_mask_t cap_mask; |
670 | unsigned short max_xor; | 698 | unsigned short max_xor; |
671 | unsigned short max_pq; | 699 | unsigned short max_pq; |
@@ -1140,9 +1168,11 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | |||
1140 | void dma_issue_pending_all(void); | 1168 | void dma_issue_pending_all(void); |
1141 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | 1169 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
1142 | dma_filter_fn fn, void *fn_param); | 1170 | dma_filter_fn fn, void *fn_param); |
1143 | struct dma_chan *dma_request_slave_channel_reason(struct device *dev, | ||
1144 | const char *name); | ||
1145 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); | 1171 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); |
1172 | |||
1173 | struct dma_chan *dma_request_chan(struct device *dev, const char *name); | ||
1174 | struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); | ||
1175 | |||
1146 | void dma_release_channel(struct dma_chan *chan); | 1176 | void dma_release_channel(struct dma_chan *chan); |
1147 | int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); | 1177 | int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); |
1148 | #else | 1178 | #else |
@@ -1166,16 +1196,21 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | |||
1166 | { | 1196 | { |
1167 | return NULL; | 1197 | return NULL; |
1168 | } | 1198 | } |
1169 | static inline struct dma_chan *dma_request_slave_channel_reason( | ||
1170 | struct device *dev, const char *name) | ||
1171 | { | ||
1172 | return ERR_PTR(-ENODEV); | ||
1173 | } | ||
1174 | static inline struct dma_chan *dma_request_slave_channel(struct device *dev, | 1199 | static inline struct dma_chan *dma_request_slave_channel(struct device *dev, |
1175 | const char *name) | 1200 | const char *name) |
1176 | { | 1201 | { |
1177 | return NULL; | 1202 | return NULL; |
1178 | } | 1203 | } |
1204 | static inline struct dma_chan *dma_request_chan(struct device *dev, | ||
1205 | const char *name) | ||
1206 | { | ||
1207 | return ERR_PTR(-ENODEV); | ||
1208 | } | ||
1209 | static inline struct dma_chan *dma_request_chan_by_mask( | ||
1210 | const dma_cap_mask_t *mask) | ||
1211 | { | ||
1212 | return ERR_PTR(-ENODEV); | ||
1213 | } | ||
1179 | static inline void dma_release_channel(struct dma_chan *chan) | 1214 | static inline void dma_release_channel(struct dma_chan *chan) |
1180 | { | 1215 | { |
1181 | } | 1216 | } |
@@ -1186,6 +1221,8 @@ static inline int dma_get_slave_caps(struct dma_chan *chan, | |||
1186 | } | 1221 | } |
1187 | #endif | 1222 | #endif |
1188 | 1223 | ||
1224 | #define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name) | ||
1225 | |||
1189 | static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) | 1226 | static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) |
1190 | { | 1227 | { |
1191 | struct dma_slave_caps caps; | 1228 | struct dma_slave_caps caps; |