diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2013-03-17 22:40:50 -0400 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2013-03-17 22:40:50 -0400 |
| commit | 688d794c4c3f8b08c814381ee2edd3ede5856056 (patch) | |
| tree | ef680add71e2a9588d07d8b594edbc1b5cd127d7 /include/linux/dmaengine.h | |
| parent | 16142655269aaf580488e074eabfdcf0fb4e3687 (diff) | |
| parent | a937536b868b8369b98967929045f1df54234323 (diff) | |
Merge tag 'v3.9-rc3' into next
Merge with mainline to bring in module_platform_driver_probe() and
devm_ioremap_resource().
Diffstat (limited to 'include/linux/dmaengine.h')
| -rw-r--r-- | include/linux/dmaengine.h | 64 |
1 files changed, 40 insertions, 24 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d3201e438d16..91ac8da25020 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -608,7 +608,10 @@ static inline int dmaengine_device_control(struct dma_chan *chan, | |||
| 608 | enum dma_ctrl_cmd cmd, | 608 | enum dma_ctrl_cmd cmd, |
| 609 | unsigned long arg) | 609 | unsigned long arg) |
| 610 | { | 610 | { |
| 611 | return chan->device->device_control(chan, cmd, arg); | 611 | if (chan->device->device_control) |
| 612 | return chan->device->device_control(chan, cmd, arg); | ||
| 613 | |||
| 614 | return -ENOSYS; | ||
| 612 | } | 615 | } |
| 613 | 616 | ||
| 614 | static inline int dmaengine_slave_config(struct dma_chan *chan, | 617 | static inline int dmaengine_slave_config(struct dma_chan *chan, |
| @@ -618,6 +621,11 @@ static inline int dmaengine_slave_config(struct dma_chan *chan, | |||
| 618 | (unsigned long)config); | 621 | (unsigned long)config); |
| 619 | } | 622 | } |
| 620 | 623 | ||
| 624 | static inline bool is_slave_direction(enum dma_transfer_direction direction) | ||
| 625 | { | ||
| 626 | return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM); | ||
| 627 | } | ||
| 628 | |||
| 621 | static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( | 629 | static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( |
| 622 | struct dma_chan *chan, dma_addr_t buf, size_t len, | 630 | struct dma_chan *chan, dma_addr_t buf, size_t len, |
| 623 | enum dma_transfer_direction dir, unsigned long flags) | 631 | enum dma_transfer_direction dir, unsigned long flags) |
| @@ -660,6 +668,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( | |||
| 660 | period_len, dir, flags, NULL); | 668 | period_len, dir, flags, NULL); |
| 661 | } | 669 | } |
| 662 | 670 | ||
| 671 | static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( | ||
| 672 | struct dma_chan *chan, struct dma_interleaved_template *xt, | ||
| 673 | unsigned long flags) | ||
| 674 | { | ||
| 675 | return chan->device->device_prep_interleaved_dma(chan, xt, flags); | ||
| 676 | } | ||
| 677 | |||
| 663 | static inline int dmaengine_terminate_all(struct dma_chan *chan) | 678 | static inline int dmaengine_terminate_all(struct dma_chan *chan) |
| 664 | { | 679 | { |
| 665 | return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); | 680 | return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); |
| @@ -849,20 +864,6 @@ static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) | |||
| 849 | return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; | 864 | return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; |
| 850 | } | 865 | } |
| 851 | 866 | ||
| 852 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) | ||
| 853 | static inline int __first_dma_cap(const dma_cap_mask_t *srcp) | ||
| 854 | { | ||
| 855 | return min_t(int, DMA_TX_TYPE_END, | ||
| 856 | find_first_bit(srcp->bits, DMA_TX_TYPE_END)); | ||
| 857 | } | ||
| 858 | |||
| 859 | #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) | ||
| 860 | static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) | ||
| 861 | { | ||
| 862 | return min_t(int, DMA_TX_TYPE_END, | ||
| 863 | find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); | ||
| 864 | } | ||
| 865 | |||
| 866 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) | 867 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) |
| 867 | static inline void | 868 | static inline void |
| 868 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | 869 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) |
| @@ -891,9 +892,7 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | |||
| 891 | } | 892 | } |
| 892 | 893 | ||
| 893 | #define for_each_dma_cap_mask(cap, mask) \ | 894 | #define for_each_dma_cap_mask(cap, mask) \ |
| 894 | for ((cap) = first_dma_cap(mask); \ | 895 | for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END) |
| 895 | (cap) < DMA_TX_TYPE_END; \ | ||
| 896 | (cap) = next_dma_cap((cap), (mask))) | ||
| 897 | 896 | ||
| 898 | /** | 897 | /** |
| 899 | * dma_async_issue_pending - flush pending transactions to HW | 898 | * dma_async_issue_pending - flush pending transactions to HW |
| @@ -907,8 +906,6 @@ static inline void dma_async_issue_pending(struct dma_chan *chan) | |||
| 907 | chan->device->device_issue_pending(chan); | 906 | chan->device->device_issue_pending(chan); |
| 908 | } | 907 | } |
| 909 | 908 | ||
| 910 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) | ||
| 911 | |||
| 912 | /** | 909 | /** |
| 913 | * dma_async_is_tx_complete - poll for transaction completion | 910 | * dma_async_is_tx_complete - poll for transaction completion |
| 914 | * @chan: DMA channel | 911 | * @chan: DMA channel |
| @@ -934,16 +931,13 @@ static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, | |||
| 934 | return status; | 931 | return status; |
| 935 | } | 932 | } |
| 936 | 933 | ||
| 937 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ | ||
| 938 | dma_async_is_tx_complete(chan, cookie, last, used) | ||
| 939 | |||
| 940 | /** | 934 | /** |
| 941 | * dma_async_is_complete - test a cookie against chan state | 935 | * dma_async_is_complete - test a cookie against chan state |
| 942 | * @cookie: transaction identifier to test status of | 936 | * @cookie: transaction identifier to test status of |
| 943 | * @last_complete: last know completed transaction | 937 | * @last_complete: last know completed transaction |
| 944 | * @last_used: last cookie value handed out | 938 | * @last_used: last cookie value handed out |
| 945 | * | 939 | * |
| 946 | * dma_async_is_complete() is used in dma_async_memcpy_complete() | 940 | * dma_async_is_complete() is used in dma_async_is_tx_complete() |
| 947 | * the test logic is separated for lightweight testing of multiple cookies | 941 | * the test logic is separated for lightweight testing of multiple cookies |
| 948 | */ | 942 | */ |
| 949 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | 943 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, |
| @@ -974,6 +968,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | |||
| 974 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 968 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
| 975 | void dma_issue_pending_all(void); | 969 | void dma_issue_pending_all(void); |
| 976 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); | 970 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); |
| 971 | struct dma_chan *dma_request_slave_channel(struct device *dev, char *name); | ||
| 977 | void dma_release_channel(struct dma_chan *chan); | 972 | void dma_release_channel(struct dma_chan *chan); |
| 978 | #else | 973 | #else |
| 979 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 974 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
| @@ -988,6 +983,11 @@ static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, | |||
| 988 | { | 983 | { |
| 989 | return NULL; | 984 | return NULL; |
| 990 | } | 985 | } |
| 986 | static inline struct dma_chan *dma_request_slave_channel(struct device *dev, | ||
| 987 | char *name) | ||
| 988 | { | ||
| 989 | return NULL; | ||
| 990 | } | ||
| 991 | static inline void dma_release_channel(struct dma_chan *chan) | 991 | static inline void dma_release_channel(struct dma_chan *chan) |
| 992 | { | 992 | { |
| 993 | } | 993 | } |
| @@ -1001,6 +1001,22 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | |||
| 1001 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | 1001 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); |
| 1002 | struct dma_chan *net_dma_find_channel(void); | 1002 | struct dma_chan *net_dma_find_channel(void); |
| 1003 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) | 1003 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) |
| 1004 | #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ | ||
| 1005 | __dma_request_slave_channel_compat(&(mask), x, y, dev, name) | ||
| 1006 | |||
| 1007 | static inline struct dma_chan | ||
| 1008 | *__dma_request_slave_channel_compat(dma_cap_mask_t *mask, dma_filter_fn fn, | ||
| 1009 | void *fn_param, struct device *dev, | ||
| 1010 | char *name) | ||
| 1011 | { | ||
| 1012 | struct dma_chan *chan; | ||
| 1013 | |||
| 1014 | chan = dma_request_slave_channel(dev, name); | ||
| 1015 | if (chan) | ||
| 1016 | return chan; | ||
| 1017 | |||
| 1018 | return __dma_request_channel(mask, fn, fn_param); | ||
| 1019 | } | ||
| 1004 | 1020 | ||
| 1005 | /* --- Helper iov-locking functions --- */ | 1021 | /* --- Helper iov-locking functions --- */ |
| 1006 | 1022 | ||
