aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h36
1 files changed, 36 insertions, 0 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 41cf0c399288..ba5f96db0754 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -22,6 +22,7 @@
22#define LINUX_DMAENGINE_H 22#define LINUX_DMAENGINE_H
23 23
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/err.h>
25#include <linux/uio.h> 26#include <linux/uio.h>
26#include <linux/bug.h> 27#include <linux/bug.h>
27#include <linux/scatterlist.h> 28#include <linux/scatterlist.h>
@@ -363,6 +364,32 @@ struct dma_slave_config {
363 unsigned int slave_id; 364 unsigned int slave_id;
364}; 365};
365 366
367/**
368 * enum dma_residue_granularity - Granularity of the reported transfer residue
369 * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The
370 * DMA channel is only able to tell whether a descriptor has been completed or
371 * not, which means residue reporting is not supported by this channel. The
372 * residue field of the dma_tx_state field will always be 0.
373 * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully
374 * completed segment of the transfer (For cyclic transfers this is after each
375 * period). This is typically implemented by having the hardware generate an
376 * interrupt after each transferred segment and then the drivers updates the
377 * outstanding residue by the size of the segment. Another possibility is if
378 * the hardware supports scatter-gather and the segment descriptor has a field
379 * which gets set after the segment has been completed. The driver then counts
380 * the number of segments without the flag set to compute the residue.
381 * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred
382 * burst. This is typically only supported if the hardware has a progress
383 * register of some sort (E.g. a register with the current read/write address
384 * or a register with the amount of bursts/beats/bytes that have been
385 * transferred or still need to be transferred).
386 */
387enum dma_residue_granularity {
388 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
389 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
390 DMA_RESIDUE_GRANULARITY_BURST = 2,
391};
392
366/* struct dma_slave_caps - expose capabilities of a slave channel only 393/* struct dma_slave_caps - expose capabilities of a slave channel only
367 * 394 *
368 * @src_addr_widths: bit mask of src addr widths the channel supports 395 * @src_addr_widths: bit mask of src addr widths the channel supports
@@ -373,6 +400,7 @@ struct dma_slave_config {
373 * should be checked by controller as well 400 * should be checked by controller as well
374 * @cmd_pause: true, if pause and thereby resume is supported 401 * @cmd_pause: true, if pause and thereby resume is supported
375 * @cmd_terminate: true, if terminate cmd is supported 402 * @cmd_terminate: true, if terminate cmd is supported
403 * @residue_granularity: granularity of the reported transfer residue
376 */ 404 */
377struct dma_slave_caps { 405struct dma_slave_caps {
378 u32 src_addr_widths; 406 u32 src_addr_widths;
@@ -380,6 +408,7 @@ struct dma_slave_caps {
380 u32 directions; 408 u32 directions;
381 bool cmd_pause; 409 bool cmd_pause;
382 bool cmd_terminate; 410 bool cmd_terminate;
411 enum dma_residue_granularity residue_granularity;
383}; 412};
384 413
385static inline const char *dma_chan_name(struct dma_chan *chan) 414static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -1040,6 +1069,8 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1040void dma_issue_pending_all(void); 1069void dma_issue_pending_all(void);
1041struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1070struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1042 dma_filter_fn fn, void *fn_param); 1071 dma_filter_fn fn, void *fn_param);
1072struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
1073 const char *name);
1043struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 1074struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1044void dma_release_channel(struct dma_chan *chan); 1075void dma_release_channel(struct dma_chan *chan);
1045#else 1076#else
@@ -1063,6 +1094,11 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1063{ 1094{
1064 return NULL; 1095 return NULL;
1065} 1096}
1097static inline struct dma_chan *dma_request_slave_channel_reason(
1098 struct device *dev, const char *name)
1099{
1100 return ERR_PTR(-ENODEV);
1101}
1066static inline struct dma_chan *dma_request_slave_channel(struct device *dev, 1102static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1067 const char *name) 1103 const char *name)
1068{ 1104{