aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-07-14 15:19:02 -0400
committerDan Williams <dan.j.williams@intel.com>2009-08-29 22:09:27 -0400
commit95475e57113c66aac7583925736ed2e2d58c990d (patch)
tree933aa0ca3bffef5b1457c516fbe3e8690b4c4cb1 /drivers/dma
parentaf1f951eb6ef27b01cbfb3f6c21b770af4368a6d (diff)
async_tx: remove walk of tx->parent chain in dma_wait_for_async_tx
We currently walk the parent chain when waiting for a given tx to complete however this walk may race with the driver cleanup routine. The routines in async_raid6_recov.c may fall back to the synchronous path at any point so we need to be prepared to call async_tx_quiesce() (which calls dma_wait_for_async_tx). To remove the ->parent walk we guarantee that every time a dependency is attached ->issue_pending() is invoked, then we can simply poll the initial descriptor until completion. This also allows for a lighter weight 'issue pending' implementation as there is no longer a requirement to iterate through all the channels' ->issue_pending() routines as long as operations have been submitted in an ordered chain. async_tx_issue_pending() is added for this case. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dmaengine.c45
1 files changed, 10 insertions, 35 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6781e8f3c064..e002e0e0d055 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -934,49 +934,24 @@ EXPORT_SYMBOL(dma_async_tx_descriptor_init);
934 934
935/* dma_wait_for_async_tx - spin wait for a transaction to complete 935/* dma_wait_for_async_tx - spin wait for a transaction to complete
936 * @tx: in-flight transaction to wait on 936 * @tx: in-flight transaction to wait on
937 *
938 * This routine assumes that tx was obtained from a call to async_memcpy,
939 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
940 * and submitted). Walking the parent chain is only meant to cover for DMA
941 * drivers that do not implement the DMA_INTERRUPT capability and may race with
942 * the driver's descriptor cleanup routine.
943 */ 937 */
944enum dma_status 938enum dma_status
945dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 939dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
946{ 940{
947 enum dma_status status; 941 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
948 struct dma_async_tx_descriptor *iter;
949 struct dma_async_tx_descriptor *parent;
950 942
951 if (!tx) 943 if (!tx)
952 return DMA_SUCCESS; 944 return DMA_SUCCESS;
953 945
954 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" 946 while (tx->cookie == -EBUSY) {
955 " %s\n", __func__, dma_chan_name(tx->chan)); 947 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
956 948 pr_err("%s timeout waiting for descriptor submission\n",
957 /* poll through the dependency chain, return when tx is complete */ 949 __func__);
958 do { 950 return DMA_ERROR;
959 iter = tx; 951 }
960 952 cpu_relax();
961 /* find the root of the unsubmitted dependency chain */ 953 }
962 do { 954 return dma_sync_wait(tx->chan, tx->cookie);
963 parent = iter->parent;
964 if (!parent)
965 break;
966 else
967 iter = parent;
968 } while (parent);
969
970 /* there is a small window for ->parent == NULL and
971 * ->cookie == -EBUSY
972 */
973 while (iter->cookie == -EBUSY)
974 cpu_relax();
975
976 status = dma_sync_wait(iter->chan, iter->cookie);
977 } while (status == DMA_IN_PROGRESS || (iter != tx));
978
979 return status;
980} 955}
981EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); 956EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
982 957