aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJon Mason <jon.mason@intel.com>2013-09-09 19:51:59 -0400
committerDan Williams <dan.j.williams@intel.com>2013-09-09 20:02:38 -0400
commit4a43f394a08214eaf92cdd8ce3eae75e555323d8 (patch)
treed0393349b7823dcf715929bb158c1e5904de056f
parentab5f8c6ee8af91a8829677f41c3f6afa9c00d48d (diff)
dmaengine: dma_sync_wait and dma_find_channel undefined
dma_sync_wait and dma_find_channel are declared regardless of whether CONFIG_DMA_ENGINE is enabled, but calling the function without CONFIG_DMA_ENGINE enabled results "undefined reference" errors. To get around this, declare dma_sync_wait and dma_find_channel as inline functions if CONFIG_DMA_ENGINE is undefined. Signed-off-by: Jon Mason <jon.mason@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--include/linux/dmaengine.h12
1 files changed, 10 insertions, 2 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index b3ba7e410943..0c72b89a172c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -961,8 +961,9 @@ dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used,
961 } 961 }
962} 962}
963 963
964enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
965#ifdef CONFIG_DMA_ENGINE 964#ifdef CONFIG_DMA_ENGINE
965struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
966enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
966enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 967enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
967void dma_issue_pending_all(void); 968void dma_issue_pending_all(void);
968struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 969struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
@@ -970,6 +971,14 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
970struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 971struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
971void dma_release_channel(struct dma_chan *chan); 972void dma_release_channel(struct dma_chan *chan);
972#else 973#else
974static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
975{
976 return NULL;
977}
978static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
979{
980 return DMA_SUCCESS;
981}
973static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 982static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
974{ 983{
975 return DMA_SUCCESS; 984 return DMA_SUCCESS;
@@ -997,7 +1006,6 @@ static inline void dma_release_channel(struct dma_chan *chan)
997int dma_async_device_register(struct dma_device *device); 1006int dma_async_device_register(struct dma_device *device);
998void dma_async_device_unregister(struct dma_device *device); 1007void dma_async_device_unregister(struct dma_device *device);
999void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 1008void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1000struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1001struct dma_chan *net_dma_find_channel(void); 1009struct dma_chan *net_dma_find_channel(void);
1002#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) 1010#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1003#define dma_request_slave_channel_compat(mask, x, y, dev, name) \ 1011#define dma_request_slave_channel_compat(mask, x, y, dev, name) \