aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h48
1 files changed, 24 insertions, 24 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index d3201e438d16..f5939999cb65 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -608,7 +608,10 @@ static inline int dmaengine_device_control(struct dma_chan *chan,
608 enum dma_ctrl_cmd cmd, 608 enum dma_ctrl_cmd cmd,
609 unsigned long arg) 609 unsigned long arg)
610{ 610{
611 return chan->device->device_control(chan, cmd, arg); 611 if (chan->device->device_control)
612 return chan->device->device_control(chan, cmd, arg);
613
614 return -ENOSYS;
612} 615}
613 616
614static inline int dmaengine_slave_config(struct dma_chan *chan, 617static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -618,6 +621,11 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
618 (unsigned long)config); 621 (unsigned long)config);
619} 622}
620 623
624static inline bool is_slave_direction(enum dma_transfer_direction direction)
625{
626 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
627}
628
621static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( 629static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
622 struct dma_chan *chan, dma_addr_t buf, size_t len, 630 struct dma_chan *chan, dma_addr_t buf, size_t len,
623 enum dma_transfer_direction dir, unsigned long flags) 631 enum dma_transfer_direction dir, unsigned long flags)
@@ -660,6 +668,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
660 period_len, dir, flags, NULL); 668 period_len, dir, flags, NULL);
661} 669}
662 670
671static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
672 struct dma_chan *chan, struct dma_interleaved_template *xt,
673 unsigned long flags)
674{
675 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
676}
677
663static inline int dmaengine_terminate_all(struct dma_chan *chan) 678static inline int dmaengine_terminate_all(struct dma_chan *chan)
664{ 679{
665 return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 680 return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
@@ -849,20 +864,6 @@ static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
849 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; 864 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
850} 865}
851 866
852#define first_dma_cap(mask) __first_dma_cap(&(mask))
853static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
854{
855 return min_t(int, DMA_TX_TYPE_END,
856 find_first_bit(srcp->bits, DMA_TX_TYPE_END));
857}
858
859#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
860static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
861{
862 return min_t(int, DMA_TX_TYPE_END,
863 find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
864}
865
866#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) 867#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
867static inline void 868static inline void
868__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) 869__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
@@ -891,9 +892,7 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
891} 892}
892 893
893#define for_each_dma_cap_mask(cap, mask) \ 894#define for_each_dma_cap_mask(cap, mask) \
894 for ((cap) = first_dma_cap(mask); \ 895 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
895 (cap) < DMA_TX_TYPE_END; \
896 (cap) = next_dma_cap((cap), (mask)))
897 896
898/** 897/**
899 * dma_async_issue_pending - flush pending transactions to HW 898 * dma_async_issue_pending - flush pending transactions to HW
@@ -907,8 +906,6 @@ static inline void dma_async_issue_pending(struct dma_chan *chan)
907 chan->device->device_issue_pending(chan); 906 chan->device->device_issue_pending(chan);
908} 907}
909 908
910#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
911
912/** 909/**
913 * dma_async_is_tx_complete - poll for transaction completion 910 * dma_async_is_tx_complete - poll for transaction completion
914 * @chan: DMA channel 911 * @chan: DMA channel
@@ -934,16 +931,13 @@ static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
934 return status; 931 return status;
935} 932}
936 933
937#define dma_async_memcpy_complete(chan, cookie, last, used)\
938 dma_async_is_tx_complete(chan, cookie, last, used)
939
940/** 934/**
941 * dma_async_is_complete - test a cookie against chan state 935 * dma_async_is_complete - test a cookie against chan state
942 * @cookie: transaction identifier to test status of 936 * @cookie: transaction identifier to test status of
943 * @last_complete: last know completed transaction 937 * @last_complete: last know completed transaction
944 * @last_used: last cookie value handed out 938 * @last_used: last cookie value handed out
945 * 939 *
946 * dma_async_is_complete() is used in dma_async_memcpy_complete() 940 * dma_async_is_complete() is used in dma_async_is_tx_complete()
947 * the test logic is separated for lightweight testing of multiple cookies 941 * the test logic is separated for lightweight testing of multiple cookies
948 */ 942 */
949static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, 943static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
@@ -974,6 +968,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
974enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 968enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
975void dma_issue_pending_all(void); 969void dma_issue_pending_all(void);
976struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); 970struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
971struct dma_chan *dma_request_slave_channel(struct device *dev, char *name);
977void dma_release_channel(struct dma_chan *chan); 972void dma_release_channel(struct dma_chan *chan);
978#else 973#else
979static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 974static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
@@ -988,6 +983,11 @@ static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
988{ 983{
989 return NULL; 984 return NULL;
990} 985}
986static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
987 char *name)
988{
989 return NULL;
990}
991static inline void dma_release_channel(struct dma_chan *chan) 991static inline void dma_release_channel(struct dma_chan *chan)
992{ 992{
993} 993}