aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorThierry Reding <treding@nvidia.com>2013-12-17 12:09:16 -0500
committerThierry Reding <treding@nvidia.com>2013-12-17 12:09:16 -0500
commitb03bb79d4f3677adef410274fe73e6f16a1b3f41 (patch)
tree5731451c1bead0550a2726bf948b25f7c67dc073 /drivers/dma/dmaengine.c
parent319e2e3f63c348a9b66db4667efa73178e18b17d (diff)
parent8a0a1af30cbf56b41220a02e34835022c4d72f41 (diff)
Merge tag 'tegra-for-3.14-dmas-resets-rework' into drm/for-next
ARM: tegra: implement common DMA and resets DT bindings This series converts the Tegra DTs and drivers to use the common/ standard DMA and reset bindings, rather than custom bindings. It also adds complete documentation for the Tegra clock bindings without actually changing any binding definitions. This conversion relies on a few sets of patches in branches from outside the Tegra tree: 1) A patch to add an DMA channel request API which allows deferred probe to be implemented. 2) A patch to implement a common part of the of_xlate function for DMA controllers. 3) Some ASoC patches (which in turn rely on (1) above), which support deferred probe during DMA channel allocation. 4) The Tegra clock driver changes for 3.14. Consequently, this branch is based on a merge of all of those external branches. In turn, this branch is or will be pulled into a few places that either rely on features introduced here, or would otherwise conflict with the patches: a) Tegra's own for-3.14/powergate and for-4.14/dt branches, to avoid conflicts. b) The DRM tree, which introduces new code that relies on the reset controller framework introduced in this branch, and to avoid conflicts.
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c63
1 files changed, 59 insertions, 4 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ea806bdc12ef..24095ff8a93b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -535,11 +535,41 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
535} 535}
536EXPORT_SYMBOL_GPL(dma_get_slave_channel); 536EXPORT_SYMBOL_GPL(dma_get_slave_channel);
537 537
538struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
539{
540 dma_cap_mask_t mask;
541 struct dma_chan *chan;
542 int err;
543
544 dma_cap_zero(mask);
545 dma_cap_set(DMA_SLAVE, mask);
546
547 /* lock against __dma_request_channel */
548 mutex_lock(&dma_list_mutex);
549
550 chan = private_candidate(&mask, device, NULL, NULL);
551 if (chan) {
552 err = dma_chan_get(chan);
553 if (err) {
554 pr_debug("%s: failed to get %s: (%d)\n",
555 __func__, dma_chan_name(chan), err);
556 chan = NULL;
557 }
558 }
559
560 mutex_unlock(&dma_list_mutex);
561
562 return chan;
563}
564EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
565
538/** 566/**
539 * __dma_request_channel - try to allocate an exclusive channel 567 * __dma_request_channel - try to allocate an exclusive channel
540 * @mask: capabilities that the channel must satisfy 568 * @mask: capabilities that the channel must satisfy
541 * @fn: optional callback to disposition available channels 569 * @fn: optional callback to disposition available channels
542 * @fn_param: opaque parameter to pass to dma_filter_fn 570 * @fn_param: opaque parameter to pass to dma_filter_fn
571 *
572 * Returns pointer to appropriate DMA channel on success or NULL.
543 */ 573 */
544struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 574struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
545 dma_filter_fn fn, void *fn_param) 575 dma_filter_fn fn, void *fn_param)
@@ -591,18 +621,43 @@ EXPORT_SYMBOL_GPL(__dma_request_channel);
591 * dma_request_slave_channel - try to allocate an exclusive slave channel 621 * dma_request_slave_channel - try to allocate an exclusive slave channel
592 * @dev: pointer to client device structure 622 * @dev: pointer to client device structure
593 * @name: slave channel name 623 * @name: slave channel name
624 *
625 * Returns pointer to appropriate DMA channel on success or an error pointer.
594 */ 626 */
595struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) 627struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
628 const char *name)
596{ 629{
630 struct dma_chan *chan;
631
597 /* If device-tree is present get slave info from here */ 632 /* If device-tree is present get slave info from here */
598 if (dev->of_node) 633 if (dev->of_node)
599 return of_dma_request_slave_channel(dev->of_node, name); 634 return of_dma_request_slave_channel(dev->of_node, name);
600 635
601 /* If device was enumerated by ACPI get slave info from here */ 636 /* If device was enumerated by ACPI get slave info from here */
602 if (ACPI_HANDLE(dev)) 637 if (ACPI_HANDLE(dev)) {
603 return acpi_dma_request_slave_chan_by_name(dev, name); 638 chan = acpi_dma_request_slave_chan_by_name(dev, name);
639 if (chan)
640 return chan;
641 }
604 642
605 return NULL; 643 return ERR_PTR(-ENODEV);
644}
645EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
646
647/**
648 * dma_request_slave_channel - try to allocate an exclusive slave channel
649 * @dev: pointer to client device structure
650 * @name: slave channel name
651 *
652 * Returns pointer to appropriate DMA channel on success or NULL.
653 */
654struct dma_chan *dma_request_slave_channel(struct device *dev,
655 const char *name)
656{
657 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
658 if (IS_ERR(ch))
659 return NULL;
660 return ch;
606} 661}
607EXPORT_SYMBOL_GPL(dma_request_slave_channel); 662EXPORT_SYMBOL_GPL(dma_request_slave_channel);
608 663