aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2010-12-22 08:46:46 -0500
committerDan Williams <dan.j.williams@intel.com>2011-01-03 04:41:40 -0500
commit8f33d5277fada0291ea495f7fd44a3e7b7aa41d3 (patch)
treef94ab5a72d00cd7fb7c061ed68f1d8e5e1f85cde /include/linux/dmaengine.h
parent8333f65ef094e47020cd01452b4637e7daf5a77f (diff)
dmaengine: provide dummy functions for DMA_ENGINE=n
This lets drivers, optionally using the dmaengine, build with DMA_ENGINE unselected. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h13
1 files changed, 10 insertions, 3 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 9d8688b92d8b..8cd00ad98d37 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -824,6 +824,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
824#ifdef CONFIG_DMA_ENGINE 824#ifdef CONFIG_DMA_ENGINE
825enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 825enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
826void dma_issue_pending_all(void); 826void dma_issue_pending_all(void);
827struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
828void dma_release_channel(struct dma_chan *chan);
827#else 829#else
828static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 830static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
829{ 831{
@@ -831,7 +833,14 @@ static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descript
831} 833}
832static inline void dma_issue_pending_all(void) 834static inline void dma_issue_pending_all(void)
833{ 835{
834 do { } while (0); 836}
837static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
838 dma_filter_fn fn, void *fn_param)
839{
840 return NULL;
841}
842static inline void dma_release_channel(struct dma_chan *chan)
843{
835} 844}
836#endif 845#endif
837 846
@@ -842,8 +851,6 @@ void dma_async_device_unregister(struct dma_device *device);
842void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 851void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
843struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); 852struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
844#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) 853#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
845struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
846void dma_release_channel(struct dma_chan *chan);
847 854
848/* --- Helper iov-locking functions --- */ 855/* --- Helper iov-locking functions --- */
849 856