aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-19 17:33:14 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-19 17:35:54 -0500
commitc50331e8be32eaba5e1949f98c70d50b891262db (patch)
treeb748c607329fd03868226ab4fba234a5702368d9 /include/linux/dmaengine.h
parent83436a0560e9ef8af2f0796264dde4bed1415359 (diff)
dmaengine: dma_issue_pending_all == nop when CONFIG_DMA_ENGINE=n
The device list will always be empty in this configuration, so no need to walk the list. Reported-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 64dea2ab326c..c4a560e72ab7 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -390,11 +390,16 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
390enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); 390enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
391#ifdef CONFIG_DMA_ENGINE 391#ifdef CONFIG_DMA_ENGINE
392enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 392enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
393void dma_issue_pending_all(void);
393#else 394#else
394static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 395static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
395{ 396{
396 return DMA_SUCCESS; 397 return DMA_SUCCESS;
397} 398}
399static inline void dma_issue_pending_all(void)
400{
401 do { } while (0);
402}
398#endif 403#endif
399 404
400/* --- DMA device --- */ 405/* --- DMA device --- */
@@ -403,7 +408,6 @@ int dma_async_device_register(struct dma_device *device);
403void dma_async_device_unregister(struct dma_device *device); 408void dma_async_device_unregister(struct dma_device *device);
404void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 409void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
405struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); 410struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
406void dma_issue_pending_all(void);
407#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) 411#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
408struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); 412struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
409void dma_release_channel(struct dma_chan *chan); 413void dma_release_channel(struct dma_chan *chan);