aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h99
1 files changed, 92 insertions, 7 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 75f53f874b24..679b349d9b66 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -23,7 +23,6 @@
23 23
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/uio.h> 25#include <linux/uio.h>
26#include <linux/dma-direction.h>
27#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
28#include <linux/bitmap.h> 27#include <linux/bitmap.h>
29#include <asm/page.h> 28#include <asm/page.h>
@@ -72,11 +71,93 @@ enum dma_transaction_type {
72 DMA_ASYNC_TX, 71 DMA_ASYNC_TX,
73 DMA_SLAVE, 72 DMA_SLAVE,
74 DMA_CYCLIC, 73 DMA_CYCLIC,
74 DMA_INTERLEAVE,
75/* last transaction type for creation of the capabilities mask */
76 DMA_TX_TYPE_END,
75}; 77};
76 78
77/* last transaction type for creation of the capabilities mask */ 79/**
78#define DMA_TX_TYPE_END (DMA_CYCLIC + 1) 80 * enum dma_transfer_direction - dma transfer mode and direction indicator
81 * @DMA_MEM_TO_MEM: Async/Memcpy mode
82 * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
83 * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
84 * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
85 */
86enum dma_transfer_direction {
87 DMA_MEM_TO_MEM,
88 DMA_MEM_TO_DEV,
89 DMA_DEV_TO_MEM,
90 DMA_DEV_TO_DEV,
91 DMA_TRANS_NONE,
92};
93
94/**
95 * Interleaved Transfer Request
96 * ----------------------------
97 * A chunk is collection of contiguous bytes to be transfered.
98 * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
99 * ICGs may or maynot change between chunks.
100 * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
101 * that when repeated an integral number of times, specifies the transfer.
102 * A transfer template is specification of a Frame, the number of times
103 * it is to be repeated and other per-transfer attributes.
104 *
105 * Practically, a client driver would have ready a template for each
106 * type of transfer it is going to need during its lifetime and
107 * set only 'src_start' and 'dst_start' before submitting the requests.
108 *
109 *
110 * | Frame-1 | Frame-2 | ~ | Frame-'numf' |
111 * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
112 *
113 * == Chunk size
114 * ... ICG
115 */
116
117/**
118 * struct data_chunk - Element of scatter-gather list that makes a frame.
119 * @size: Number of bytes to read from source.
120 * size_dst := fn(op, size_src), so doesn't mean much for destination.
121 * @icg: Number of bytes to jump after last src/dst address of this
122 * chunk and before first src/dst address for next chunk.
123 * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
124 * Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
125 */
126struct data_chunk {
127 size_t size;
128 size_t icg;
129};
79 130
131/**
132 * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
133 * and attributes.
134 * @src_start: Bus address of source for the first chunk.
135 * @dst_start: Bus address of destination for the first chunk.
136 * @dir: Specifies the type of Source and Destination.
137 * @src_inc: If the source address increments after reading from it.
138 * @dst_inc: If the destination address increments after writing to it.
139 * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
140 * Otherwise, source is read contiguously (icg ignored).
141 * Ignored if src_inc is false.
142 * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
143 * Otherwise, destination is filled contiguously (icg ignored).
144 * Ignored if dst_inc is false.
145 * @numf: Number of frames in this template.
146 * @frame_size: Number of chunks in a frame i.e, size of sgl[].
147 * @sgl: Array of {chunk,icg} pairs that make up a frame.
148 */
149struct dma_interleaved_template {
150 dma_addr_t src_start;
151 dma_addr_t dst_start;
152 enum dma_transfer_direction dir;
153 bool src_inc;
154 bool dst_inc;
155 bool src_sgl;
156 bool dst_sgl;
157 size_t numf;
158 size_t frame_size;
159 struct data_chunk sgl[0];
160};
80 161
81/** 162/**
82 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 163 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
@@ -269,7 +350,7 @@ enum dma_slave_buswidth {
269 * struct, if applicable. 350 * struct, if applicable.
270 */ 351 */
271struct dma_slave_config { 352struct dma_slave_config {
272 enum dma_data_direction direction; 353 enum dma_transfer_direction direction;
273 dma_addr_t src_addr; 354 dma_addr_t src_addr;
274 dma_addr_t dst_addr; 355 dma_addr_t dst_addr;
275 enum dma_slave_buswidth src_addr_width; 356 enum dma_slave_buswidth src_addr_width;
@@ -433,6 +514,7 @@ struct dma_tx_state {
433 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. 514 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
434 * The function takes a buffer of size buf_len. The callback function will 515 * The function takes a buffer of size buf_len. The callback function will
435 * be called after period_len bytes have been transferred. 516 * be called after period_len bytes have been transferred.
517 * @device_prep_interleaved_dma: Transfer expression in a generic way.
436 * @device_control: manipulate all pending operations on a channel, returns 518 * @device_control: manipulate all pending operations on a channel, returns
437 * zero or error code 519 * zero or error code
438 * @device_tx_status: poll for transaction completion, the optional 520 * @device_tx_status: poll for transaction completion, the optional
@@ -492,11 +574,14 @@ struct dma_device {
492 574
493 struct dma_async_tx_descriptor *(*device_prep_slave_sg)( 575 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
494 struct dma_chan *chan, struct scatterlist *sgl, 576 struct dma_chan *chan, struct scatterlist *sgl,
495 unsigned int sg_len, enum dma_data_direction direction, 577 unsigned int sg_len, enum dma_transfer_direction direction,
496 unsigned long flags); 578 unsigned long flags);
497 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( 579 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
498 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 580 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
499 size_t period_len, enum dma_data_direction direction); 581 size_t period_len, enum dma_transfer_direction direction);
582 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
583 struct dma_chan *chan, struct dma_interleaved_template *xt,
584 unsigned long flags);
500 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 585 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
501 unsigned long arg); 586 unsigned long arg);
502 587
@@ -522,7 +607,7 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
522 607
523static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( 608static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
524 struct dma_chan *chan, void *buf, size_t len, 609 struct dma_chan *chan, void *buf, size_t len,
525 enum dma_data_direction dir, unsigned long flags) 610 enum dma_transfer_direction dir, unsigned long flags)
526{ 611{
527 struct scatterlist sg; 612 struct scatterlist sg;
528 sg_init_one(&sg, buf, len); 613 sg_init_one(&sg, buf, len);