diff options
author | Matt Porter <mporter@ti.com> | 2013-01-10 13:41:04 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2013-01-20 07:04:29 -0500 |
commit | 661f7cb55c61fa7491e0caf21e55f59e5bc49abe (patch) | |
tree | 285511be3d5676f321392e9fe8683564d6ab646a /drivers/dma | |
parent | 3a95b9fbba893ebfa9b83de105707539e0228e0c (diff) |
dma: edma: fix slave config dependency on direction
The edma_slave_config() implementation depends on the
direction field such that it will not properly configure
a slave channel when called without direction set.
This fixes the implementation so that the slave config
is copied as is and prep_slave_sg() handles the
direction dependent handling. spi-omap2-mcspi and
omap_hsmmc both expose this bug as they configure the
slave channel config from a common path with an unconfigured
direction field.
Signed-off-by: Matt Porter <mporter@ti.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/edma.c | 55 |
1 files changed, 27 insertions, 28 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 232b4583ae93..82c8672f26e8 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -69,9 +69,7 @@ struct edma_chan { | |||
69 | int ch_num; | 69 | int ch_num; |
70 | bool alloced; | 70 | bool alloced; |
71 | int slot[EDMA_MAX_SLOTS]; | 71 | int slot[EDMA_MAX_SLOTS]; |
72 | dma_addr_t addr; | 72 | struct dma_slave_config cfg; |
73 | int addr_width; | ||
74 | int maxburst; | ||
75 | }; | 73 | }; |
76 | 74 | ||
77 | struct edma_cc { | 75 | struct edma_cc { |
@@ -178,29 +176,14 @@ static int edma_terminate_all(struct edma_chan *echan) | |||
178 | return 0; | 176 | return 0; |
179 | } | 177 | } |
180 | 178 | ||
181 | |||
182 | static int edma_slave_config(struct edma_chan *echan, | 179 | static int edma_slave_config(struct edma_chan *echan, |
183 | struct dma_slave_config *config) | 180 | struct dma_slave_config *cfg) |
184 | { | 181 | { |
185 | if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) || | 182 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
186 | (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) | 183 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
187 | return -EINVAL; | 184 | return -EINVAL; |
188 | 185 | ||
189 | if (config->direction == DMA_MEM_TO_DEV) { | 186 | memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); |
190 | if (config->dst_addr) | ||
191 | echan->addr = config->dst_addr; | ||
192 | if (config->dst_addr_width) | ||
193 | echan->addr_width = config->dst_addr_width; | ||
194 | if (config->dst_maxburst) | ||
195 | echan->maxburst = config->dst_maxburst; | ||
196 | } else if (config->direction == DMA_DEV_TO_MEM) { | ||
197 | if (config->src_addr) | ||
198 | echan->addr = config->src_addr; | ||
199 | if (config->src_addr_width) | ||
200 | echan->addr_width = config->src_addr_width; | ||
201 | if (config->src_maxburst) | ||
202 | echan->maxburst = config->src_maxburst; | ||
203 | } | ||
204 | 187 | ||
205 | return 0; | 188 | return 0; |
206 | } | 189 | } |
@@ -235,6 +218,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
235 | struct edma_chan *echan = to_edma_chan(chan); | 218 | struct edma_chan *echan = to_edma_chan(chan); |
236 | struct device *dev = chan->device->dev; | 219 | struct device *dev = chan->device->dev; |
237 | struct edma_desc *edesc; | 220 | struct edma_desc *edesc; |
221 | dma_addr_t dev_addr; | ||
222 | enum dma_slave_buswidth dev_width; | ||
223 | u32 burst; | ||
238 | struct scatterlist *sg; | 224 | struct scatterlist *sg; |
239 | int i; | 225 | int i; |
240 | int acnt, bcnt, ccnt, src, dst, cidx; | 226 | int acnt, bcnt, ccnt, src, dst, cidx; |
@@ -243,7 +229,20 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
243 | if (unlikely(!echan || !sgl || !sg_len)) | 229 | if (unlikely(!echan || !sgl || !sg_len)) |
244 | return NULL; | 230 | return NULL; |
245 | 231 | ||
246 | if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | 232 | if (direction == DMA_DEV_TO_MEM) { |
233 | dev_addr = echan->cfg.src_addr; | ||
234 | dev_width = echan->cfg.src_addr_width; | ||
235 | burst = echan->cfg.src_maxburst; | ||
236 | } else if (direction == DMA_MEM_TO_DEV) { | ||
237 | dev_addr = echan->cfg.dst_addr; | ||
238 | dev_width = echan->cfg.dst_addr_width; | ||
239 | burst = echan->cfg.dst_maxburst; | ||
240 | } else { | ||
241 | dev_err(dev, "%s: bad direction?\n", __func__); | ||
242 | return NULL; | ||
243 | } | ||
244 | |||
245 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | ||
247 | dev_err(dev, "Undefined slave buswidth\n"); | 246 | dev_err(dev, "Undefined slave buswidth\n"); |
248 | return NULL; | 247 | return NULL; |
249 | } | 248 | } |
@@ -275,14 +274,14 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
275 | } | 274 | } |
276 | } | 275 | } |
277 | 276 | ||
278 | acnt = echan->addr_width; | 277 | acnt = dev_width; |
279 | 278 | ||
280 | /* | 279 | /* |
281 | * If the maxburst is equal to the fifo width, use | 280 | * If the maxburst is equal to the fifo width, use |
282 | * A-synced transfers. This allows for large contiguous | 281 | * A-synced transfers. This allows for large contiguous |
283 | * buffer transfers using only one PaRAM set. | 282 | * buffer transfers using only one PaRAM set. |
284 | */ | 283 | */ |
285 | if (echan->maxburst == 1) { | 284 | if (burst == 1) { |
286 | edesc->absync = false; | 285 | edesc->absync = false; |
287 | ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); | 286 | ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); |
288 | bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); | 287 | bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); |
@@ -302,7 +301,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
302 | */ | 301 | */ |
303 | } else { | 302 | } else { |
304 | edesc->absync = true; | 303 | edesc->absync = true; |
305 | bcnt = echan->maxburst; | 304 | bcnt = burst; |
306 | ccnt = sg_dma_len(sg) / (acnt * bcnt); | 305 | ccnt = sg_dma_len(sg) / (acnt * bcnt); |
307 | if (ccnt > (SZ_64K - 1)) { | 306 | if (ccnt > (SZ_64K - 1)) { |
308 | dev_err(dev, "Exceeded max SG segment size\n"); | 307 | dev_err(dev, "Exceeded max SG segment size\n"); |
@@ -313,13 +312,13 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
313 | 312 | ||
314 | if (direction == DMA_MEM_TO_DEV) { | 313 | if (direction == DMA_MEM_TO_DEV) { |
315 | src = sg_dma_address(sg); | 314 | src = sg_dma_address(sg); |
316 | dst = echan->addr; | 315 | dst = dev_addr; |
317 | src_bidx = acnt; | 316 | src_bidx = acnt; |
318 | src_cidx = cidx; | 317 | src_cidx = cidx; |
319 | dst_bidx = 0; | 318 | dst_bidx = 0; |
320 | dst_cidx = 0; | 319 | dst_cidx = 0; |
321 | } else { | 320 | } else { |
322 | src = echan->addr; | 321 | src = dev_addr; |
323 | dst = sg_dma_address(sg); | 322 | dst = sg_dma_address(sg); |
324 | src_bidx = 0; | 323 | src_bidx = 0; |
325 | src_cidx = 0; | 324 | src_cidx = 0; |