aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c94
1 files changed, 56 insertions, 38 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 5a87384ea4ff..bd0b248de2cf 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -608,6 +608,40 @@ void dmaengine_put(void)
608} 608}
609EXPORT_SYMBOL(dmaengine_put); 609EXPORT_SYMBOL(dmaengine_put);
610 610
611static bool device_has_all_tx_types(struct dma_device *device)
612{
613 /* A device that satisfies this test has channels that will never cause
614 * an async_tx channel switch event as all possible operation types can
615 * be handled.
616 */
617 #ifdef CONFIG_ASYNC_TX_DMA
618 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
619 return false;
620 #endif
621
622 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
623 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
624 return false;
625 #endif
626
627 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
628 if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
629 return false;
630 #endif
631
632 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
633 if (!dma_has_cap(DMA_XOR, device->cap_mask))
634 return false;
635 #endif
636
637 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
638 if (!dma_has_cap(DMA_PQ, device->cap_mask))
639 return false;
640 #endif
641
642 return true;
643}
644
611static int get_dma_id(struct dma_device *device) 645static int get_dma_id(struct dma_device *device)
612{ 646{
613 int rc; 647 int rc;
@@ -644,8 +678,12 @@ int dma_async_device_register(struct dma_device *device)
644 !device->device_prep_dma_memcpy); 678 !device->device_prep_dma_memcpy);
645 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && 679 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
646 !device->device_prep_dma_xor); 680 !device->device_prep_dma_xor);
647 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && 681 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
648 !device->device_prep_dma_zero_sum); 682 !device->device_prep_dma_xor_val);
683 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
684 !device->device_prep_dma_pq);
685 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
686 !device->device_prep_dma_pq_val);
649 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 687 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
650 !device->device_prep_dma_memset); 688 !device->device_prep_dma_memset);
651 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 689 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
@@ -661,6 +699,12 @@ int dma_async_device_register(struct dma_device *device)
661 BUG_ON(!device->device_issue_pending); 699 BUG_ON(!device->device_issue_pending);
662 BUG_ON(!device->dev); 700 BUG_ON(!device->dev);
663 701
702 /* note: this only matters in the
703 * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case
704 */
705 if (device_has_all_tx_types(device))
706 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
707
664 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 708 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
665 if (!idr_ref) 709 if (!idr_ref)
666 return -ENOMEM; 710 return -ENOMEM;
@@ -933,55 +977,29 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
933{ 977{
934 tx->chan = chan; 978 tx->chan = chan;
935 spin_lock_init(&tx->lock); 979 spin_lock_init(&tx->lock);
936 INIT_LIST_HEAD(&tx->tx_list);
937} 980}
938EXPORT_SYMBOL(dma_async_tx_descriptor_init); 981EXPORT_SYMBOL(dma_async_tx_descriptor_init);
939 982
940/* dma_wait_for_async_tx - spin wait for a transaction to complete 983/* dma_wait_for_async_tx - spin wait for a transaction to complete
941 * @tx: in-flight transaction to wait on 984 * @tx: in-flight transaction to wait on
942 *
943 * This routine assumes that tx was obtained from a call to async_memcpy,
944 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
945 * and submitted). Walking the parent chain is only meant to cover for DMA
946 * drivers that do not implement the DMA_INTERRUPT capability and may race with
947 * the driver's descriptor cleanup routine.
948 */ 985 */
949enum dma_status 986enum dma_status
950dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 987dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
951{ 988{
952 enum dma_status status; 989 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
953 struct dma_async_tx_descriptor *iter;
954 struct dma_async_tx_descriptor *parent;
955 990
956 if (!tx) 991 if (!tx)
957 return DMA_SUCCESS; 992 return DMA_SUCCESS;
958 993
959 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" 994 while (tx->cookie == -EBUSY) {
960 " %s\n", __func__, dma_chan_name(tx->chan)); 995 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
961 996 pr_err("%s timeout waiting for descriptor submission\n",
962 /* poll through the dependency chain, return when tx is complete */ 997 __func__);
963 do { 998 return DMA_ERROR;
964 iter = tx; 999 }
965 1000 cpu_relax();
966 /* find the root of the unsubmitted dependency chain */ 1001 }
967 do { 1002 return dma_sync_wait(tx->chan, tx->cookie);
968 parent = iter->parent;
969 if (!parent)
970 break;
971 else
972 iter = parent;
973 } while (parent);
974
975 /* there is a small window for ->parent == NULL and
976 * ->cookie == -EBUSY
977 */
978 while (iter->cookie == -EBUSY)
979 cpu_relax();
980
981 status = dma_sync_wait(iter->chan, iter->cookie);
982 } while (status == DMA_IN_PROGRESS || (iter != tx));
983
984 return status;
985} 1003}
986EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); 1004EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
987 1005