aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJassi Brar <jaswinder.singh@linaro.org>2011-10-13 03:03:30 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2011-11-18 01:46:24 -0500
commitb14dab792dee3245b628e046d80a7fad5573fea6 (patch)
treec77574d19be7c150b57bd9a9b0184580c733621b
parente0d23ef29ed637dc6bd739f590985746d9ad9caa (diff)
DMAEngine: Define interleaved transfer request api
Define a new api that could be used for doing fancy data transfers like interleaved to contiguous copy and vice-versa. Traditional SG_list based transfers tend to be very inefficient in such cases as where the interleave and chunk are only a few bytes, which call for a very condensed api to convey pattern of the transfer. This api supports all 4 variants of scatter-gather and contiguous transfer. Of course, neither can this api help transfers that don't lend to DMA by nature, i.e, scattered tiny read/writes with no periodic pattern. Also since now we support SLAVE channels that might not provide device_prep_slave_sg callback but device_prep_interleaved_dma, remove the BUG_ON check. Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org> Acked-by: Barry Song <Baohua.Song@csr.com> [renamed dmaxfer_template to dma_interleaved_template did fixup after the enum dma_transfer_merge] Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
-rw-r--r--Documentation/dmaengine.txt8
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--include/linux/dmaengine.h78
3 files changed, 85 insertions, 5 deletions
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt
index 94b7e0f96b38..bbe6cb3d1856 100644
--- a/Documentation/dmaengine.txt
+++ b/Documentation/dmaengine.txt
@@ -75,6 +75,10 @@ The slave DMA usage consists of following steps:
75 slave_sg - DMA a list of scatter gather buffers from/to a peripheral 75 slave_sg - DMA a list of scatter gather buffers from/to a peripheral
76 dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the 76 dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
77 operation is explicitly stopped. 77 operation is explicitly stopped.
78 interleaved_dma - This is common to Slave as well as M2M clients. For slave
79 address of devices' fifo could be already known to the driver.
80 Various types of operations could be expressed by setting
81 appropriate values to the 'dma_interleaved_template' members.
78 82
79 A non-NULL return of this transfer API represents a "descriptor" for 83 A non-NULL return of this transfer API represents a "descriptor" for
80 the given transaction. 84 the given transaction.
@@ -89,6 +93,10 @@ The slave DMA usage consists of following steps:
89 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 93 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
90 size_t period_len, enum dma_data_direction direction); 94 size_t period_len, enum dma_data_direction direction);
91 95
96 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
97 struct dma_chan *chan, struct dma_interleaved_template *xt,
98 unsigned long flags);
99
92 The peripheral driver is expected to have mapped the scatterlist for 100 The peripheral driver is expected to have mapped the scatterlist for
93 the DMA operation prior to calling device_prep_slave_sg, and must 101 the DMA operation prior to calling device_prep_slave_sg, and must
94 keep the scatterlist mapped until the DMA operation has completed. 102 keep the scatterlist mapped until the DMA operation has completed.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b48967b499da..a6c6051ec858 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
693 !device->device_prep_dma_interrupt); 693 !device->device_prep_dma_interrupt);
694 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && 694 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
695 !device->device_prep_dma_sg); 695 !device->device_prep_dma_sg);
696 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
697 !device->device_prep_slave_sg);
698 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 696 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
699 !device->device_prep_dma_cyclic); 697 !device->device_prep_dma_cyclic);
700 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 698 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
701 !device->device_control); 699 !device->device_control);
700 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
701 !device->device_prep_interleaved_dma);
702 702
703 BUG_ON(!device->device_alloc_chan_resources); 703 BUG_ON(!device->device_alloc_chan_resources);
704 BUG_ON(!device->device_free_chan_resources); 704 BUG_ON(!device->device_free_chan_resources);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index a865b3a354cd..5532bb8b500c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -71,10 +71,10 @@ enum dma_transaction_type {
71 DMA_ASYNC_TX, 71 DMA_ASYNC_TX,
72 DMA_SLAVE, 72 DMA_SLAVE,
73 DMA_CYCLIC, 73 DMA_CYCLIC,
74}; 74 DMA_INTERLEAVE,
75
76/* last transaction type for creation of the capabilities mask */ 75/* last transaction type for creation of the capabilities mask */
77#define DMA_TX_TYPE_END (DMA_CYCLIC + 1) 76 DMA_TX_TYPE_END,
77};
78 78
79/** 79/**
80 * enum dma_transfer_direction - dma transfer mode and direction indicator 80 * enum dma_transfer_direction - dma transfer mode and direction indicator
@@ -91,6 +91,74 @@ enum dma_transfer_direction {
91}; 91};
92 92
93/** 93/**
94 * Interleaved Transfer Request
95 * ----------------------------
96 * A chunk is collection of contiguous bytes to be transfered.
97 * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
98 * ICGs may or maynot change between chunks.
99 * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
100 * that when repeated an integral number of times, specifies the transfer.
101 * A transfer template is specification of a Frame, the number of times
102 * it is to be repeated and other per-transfer attributes.
103 *
104 * Practically, a client driver would have ready a template for each
105 * type of transfer it is going to need during its lifetime and
106 * set only 'src_start' and 'dst_start' before submitting the requests.
107 *
108 *
109 * | Frame-1 | Frame-2 | ~ | Frame-'numf' |
110 * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
111 *
112 * == Chunk size
113 * ... ICG
114 */
115
116/**
117 * struct data_chunk - Element of scatter-gather list that makes a frame.
118 * @size: Number of bytes to read from source.
119 * size_dst := fn(op, size_src), so doesn't mean much for destination.
120 * @icg: Number of bytes to jump after last src/dst address of this
121 * chunk and before first src/dst address for next chunk.
122 * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
123 * Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
124 */
125struct data_chunk {
126 size_t size;
127 size_t icg;
128};
129
130/**
131 * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
132 * and attributes.
133 * @src_start: Bus address of source for the first chunk.
134 * @dst_start: Bus address of destination for the first chunk.
135 * @dir: Specifies the type of Source and Destination.
136 * @src_inc: If the source address increments after reading from it.
137 * @dst_inc: If the destination address increments after writing to it.
138 * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
139 * Otherwise, source is read contiguously (icg ignored).
140 * Ignored if src_inc is false.
141 * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
142 * Otherwise, destination is filled contiguously (icg ignored).
143 * Ignored if dst_inc is false.
144 * @numf: Number of frames in this template.
145 * @frame_size: Number of chunks in a frame i.e, size of sgl[].
146 * @sgl: Array of {chunk,icg} pairs that make up a frame.
147 */
148struct dma_interleaved_template {
149 dma_addr_t src_start;
150 dma_addr_t dst_start;
151 enum dma_transfer_direction dir;
152 bool src_inc;
153 bool dst_inc;
154 bool src_sgl;
155 bool dst_sgl;
156 size_t numf;
157 size_t frame_size;
158 struct data_chunk sgl[0];
159};
160
161/**
94 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 162 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
95 * control completion, and communicate status. 163 * control completion, and communicate status.
96 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 164 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
@@ -445,6 +513,7 @@ struct dma_tx_state {
445 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. 513 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
446 * The function takes a buffer of size buf_len. The callback function will 514 * The function takes a buffer of size buf_len. The callback function will
447 * be called after period_len bytes have been transferred. 515 * be called after period_len bytes have been transferred.
516 * @device_prep_interleaved_dma: Transfer expression in a generic way.
448 * @device_control: manipulate all pending operations on a channel, returns 517 * @device_control: manipulate all pending operations on a channel, returns
449 * zero or error code 518 * zero or error code
450 * @device_tx_status: poll for transaction completion, the optional 519 * @device_tx_status: poll for transaction completion, the optional
@@ -509,6 +578,9 @@ struct dma_device {
509 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( 578 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
510 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 579 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
511 size_t period_len, enum dma_transfer_direction direction); 580 size_t period_len, enum dma_transfer_direction direction);
581 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
582 struct dma_chan *chan, struct dma_interleaved_template *xt,
583 unsigned long flags);
512 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 584 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
513 unsigned long arg); 585 unsigned long arg);
514 586