diff options
author | Vinod Koul <vinod.koul@intel.com> | 2016-01-06 04:47:47 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2016-01-06 04:47:47 -0500 |
commit | d3f1e93ce8e00be19711c35f0c67c54a58aea559 (patch) | |
tree | 58010cdfa4fc473fc0693410b4a444755c4438c9 /include/linux/dmaengine.h | |
parent | 7c7b680fa6b0866af2c4876da261bbfe710315d6 (diff) | |
parent | b1d6ab1aa8cdc23b89bcd578ea8d5e3c501a13d9 (diff) |
Merge branch 'topic/async' into for-linus
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r-- | include/linux/dmaengine.h | 92 |
1 files changed, 92 insertions, 0 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 8ab3bafc2332..16a1cad30c33 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -681,6 +681,8 @@ struct dma_filter { | |||
681 | * paused. Returns 0 or an error code | 681 | * paused. Returns 0 or an error code |
682 | * @device_terminate_all: Aborts all transfers on a channel. Returns 0 | 682 | * @device_terminate_all: Aborts all transfers on a channel. Returns 0 |
683 | * or an error code | 683 | * or an error code |
684 | * @device_synchronize: Synchronizes the termination of a transfers to the | ||
685 | * current context. | ||
684 | * @device_tx_status: poll for transaction completion, the optional | 686 | * @device_tx_status: poll for transaction completion, the optional |
685 | * txstate parameter can be supplied with a pointer to get a | 687 | * txstate parameter can be supplied with a pointer to get a |
686 | * struct with auxiliary transfer status information, otherwise the call | 688 | * struct with auxiliary transfer status information, otherwise the call |
@@ -767,6 +769,7 @@ struct dma_device { | |||
767 | int (*device_pause)(struct dma_chan *chan); | 769 | int (*device_pause)(struct dma_chan *chan); |
768 | int (*device_resume)(struct dma_chan *chan); | 770 | int (*device_resume)(struct dma_chan *chan); |
769 | int (*device_terminate_all)(struct dma_chan *chan); | 771 | int (*device_terminate_all)(struct dma_chan *chan); |
772 | void (*device_synchronize)(struct dma_chan *chan); | ||
770 | 773 | ||
771 | enum dma_status (*device_tx_status)(struct dma_chan *chan, | 774 | enum dma_status (*device_tx_status)(struct dma_chan *chan, |
772 | dma_cookie_t cookie, | 775 | dma_cookie_t cookie, |
@@ -858,6 +861,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( | |||
858 | src_sg, src_nents, flags); | 861 | src_sg, src_nents, flags); |
859 | } | 862 | } |
860 | 863 | ||
864 | /** | ||
865 | * dmaengine_terminate_all() - Terminate all active DMA transfers | ||
866 | * @chan: The channel for which to terminate the transfers | ||
867 | * | ||
868 | * This function is DEPRECATED use either dmaengine_terminate_sync() or | ||
869 | * dmaengine_terminate_async() instead. | ||
870 | */ | ||
861 | static inline int dmaengine_terminate_all(struct dma_chan *chan) | 871 | static inline int dmaengine_terminate_all(struct dma_chan *chan) |
862 | { | 872 | { |
863 | if (chan->device->device_terminate_all) | 873 | if (chan->device->device_terminate_all) |
@@ -866,6 +876,88 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) | |||
866 | return -ENOSYS; | 876 | return -ENOSYS; |
867 | } | 877 | } |
868 | 878 | ||
879 | /** | ||
880 | * dmaengine_terminate_async() - Terminate all active DMA transfers | ||
881 | * @chan: The channel for which to terminate the transfers | ||
882 | * | ||
883 | * Calling this function will terminate all active and pending descriptors | ||
884 | * that have previously been submitted to the channel. It is not guaranteed | ||
885 | * though that the transfer for the active descriptor has stopped when the | ||
886 | * function returns. Furthermore it is possible the complete callback of a | ||
887 | * submitted transfer is still running when this function returns. | ||
888 | * | ||
889 | * dmaengine_synchronize() needs to be called before it is safe to free | ||
890 | * any memory that is accessed by previously submitted descriptors or before | ||
891 | * freeing any resources accessed from within the completion callback of any | ||
892 | * perviously submitted descriptors. | ||
893 | * | ||
894 | * This function can be called from atomic context as well as from within a | ||
895 | * complete callback of a descriptor submitted on the same channel. | ||
896 | * | ||
897 | * If none of the two conditions above apply consider using | ||
898 | * dmaengine_terminate_sync() instead. | ||
899 | */ | ||
900 | static inline int dmaengine_terminate_async(struct dma_chan *chan) | ||
901 | { | ||
902 | if (chan->device->device_terminate_all) | ||
903 | return chan->device->device_terminate_all(chan); | ||
904 | |||
905 | return -EINVAL; | ||
906 | } | ||
907 | |||
908 | /** | ||
909 | * dmaengine_synchronize() - Synchronize DMA channel termination | ||
910 | * @chan: The channel to synchronize | ||
911 | * | ||
912 | * Synchronizes to the DMA channel termination to the current context. When this | ||
913 | * function returns it is guaranteed that all transfers for previously issued | ||
914 | * descriptors have stopped and and it is safe to free the memory assoicated | ||
915 | * with them. Furthermore it is guaranteed that all complete callback functions | ||
916 | * for a previously submitted descriptor have finished running and it is safe to | ||
917 | * free resources accessed from within the complete callbacks. | ||
918 | * | ||
919 | * The behavior of this function is undefined if dma_async_issue_pending() has | ||
920 | * been called between dmaengine_terminate_async() and this function. | ||
921 | * | ||
922 | * This function must only be called from non-atomic context and must not be | ||
923 | * called from within a complete callback of a descriptor submitted on the same | ||
924 | * channel. | ||
925 | */ | ||
926 | static inline void dmaengine_synchronize(struct dma_chan *chan) | ||
927 | { | ||
928 | might_sleep(); | ||
929 | |||
930 | if (chan->device->device_synchronize) | ||
931 | chan->device->device_synchronize(chan); | ||
932 | } | ||
933 | |||
934 | /** | ||
935 | * dmaengine_terminate_sync() - Terminate all active DMA transfers | ||
936 | * @chan: The channel for which to terminate the transfers | ||
937 | * | ||
938 | * Calling this function will terminate all active and pending transfers | ||
939 | * that have previously been submitted to the channel. It is similar to | ||
940 | * dmaengine_terminate_async() but guarantees that the DMA transfer has actually | ||
941 | * stopped and that all complete callbacks have finished running when the | ||
942 | * function returns. | ||
943 | * | ||
944 | * This function must only be called from non-atomic context and must not be | ||
945 | * called from within a complete callback of a descriptor submitted on the same | ||
946 | * channel. | ||
947 | */ | ||
948 | static inline int dmaengine_terminate_sync(struct dma_chan *chan) | ||
949 | { | ||
950 | int ret; | ||
951 | |||
952 | ret = dmaengine_terminate_async(chan); | ||
953 | if (ret) | ||
954 | return ret; | ||
955 | |||
956 | dmaengine_synchronize(chan); | ||
957 | |||
958 | return 0; | ||
959 | } | ||
960 | |||
869 | static inline int dmaengine_pause(struct dma_chan *chan) | 961 | static inline int dmaengine_pause(struct dma_chan *chan) |
870 | { | 962 | { |
871 | if (chan->device->device_pause) | 963 | if (chan->device->device_pause) |