diff options
author | Jon Mason <jon.mason@intel.com> | 2013-09-09 19:51:59 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2013-09-09 20:02:38 -0400 |
commit | 4a43f394a08214eaf92cdd8ce3eae75e555323d8 (patch) | |
tree | d0393349b7823dcf715929bb158c1e5904de056f | |
parent | ab5f8c6ee8af91a8829677f41c3f6afa9c00d48d (diff) |
dmaengine: dma_sync_wait and dma_find_channel undefined
dma_sync_wait and dma_find_channel are declared regardless of whether
CONFIG_DMA_ENGINE is enabled, but calling the function without
CONFIG_DMA_ENGINE enabled results "undefined reference" errors.
To get around this, declare dma_sync_wait and dma_find_channel as inline
functions if CONFIG_DMA_ENGINE is undefined.
Signed-off-by: Jon Mason <jon.mason@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | include/linux/dmaengine.h | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index b3ba7e410943..0c72b89a172c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -961,8 +961,9 @@ dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, | |||
961 | } | 961 | } |
962 | } | 962 | } |
963 | 963 | ||
964 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | ||
965 | #ifdef CONFIG_DMA_ENGINE | 964 | #ifdef CONFIG_DMA_ENGINE |
965 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | ||
966 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | ||
966 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 967 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
967 | void dma_issue_pending_all(void); | 968 | void dma_issue_pending_all(void); |
968 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | 969 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
@@ -970,6 +971,14 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | |||
970 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); | 971 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); |
971 | void dma_release_channel(struct dma_chan *chan); | 972 | void dma_release_channel(struct dma_chan *chan); |
972 | #else | 973 | #else |
974 | static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | ||
975 | { | ||
976 | return NULL; | ||
977 | } | ||
978 | static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | ||
979 | { | ||
980 | return DMA_SUCCESS; | ||
981 | } | ||
973 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 982 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
974 | { | 983 | { |
975 | return DMA_SUCCESS; | 984 | return DMA_SUCCESS; |
@@ -997,7 +1006,6 @@ static inline void dma_release_channel(struct dma_chan *chan) | |||
997 | int dma_async_device_register(struct dma_device *device); | 1006 | int dma_async_device_register(struct dma_device *device); |
998 | void dma_async_device_unregister(struct dma_device *device); | 1007 | void dma_async_device_unregister(struct dma_device *device); |
999 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | 1008 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); |
1000 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | ||
1001 | struct dma_chan *net_dma_find_channel(void); | 1009 | struct dma_chan *net_dma_find_channel(void); |
1002 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) | 1010 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) |
1003 | #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ | 1011 | #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ |