aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoel Fernandes <joelf@ti.com>2013-09-23 19:05:13 -0400
committerVinod Koul <vinod.koul@intel.com>2013-10-21 03:25:29 -0400
commitfd009035047941fe21622b09665423f1043f0507 (patch)
tree0178cfc3b7bc928a4d20adce25702591d24410e3
parent13098cf05a430464f50ffac73cfa2c467768410d (diff)
dma: edma: Split out PaRAM set calculations into its own function
PaRAM set calculation is abstracted into its own function to enable better reuse for other DMA cases such as cyclic. We adapt the Slave SG case to use the new function. This provides a much cleaner abstraction to the internals of the PaRAM set. However, any PaRAM attributes that are not common to all DMA types must be set separately such as setting of interrupts. This function takes care of the most-common attributes. Also added comments clarifying A-sync case calculations. Signed-off-by: Joel Fernandes <joelf@ti.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/edma.c198
1 files changed, 126 insertions, 72 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 098a8da450f0..e47e3c9e37b2 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -250,6 +250,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
250 return ret; 250 return ret;
251} 251}
252 252
253/*
254 * A PaRAM set configuration abstraction used by other modes
255 * @chan: Channel who's PaRAM set we're configuring
256 * @pset: PaRAM set to initialize and setup.
257 * @src_addr: Source address of the DMA
258 * @dst_addr: Destination address of the DMA
259 * @burst: In units of dev_width, how much to send
260 * @dev_width: How much is the dev_width
261 * @dma_length: Total length of the DMA transfer
262 * @direction: Direction of the transfer
263 */
264static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
265 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
266 enum dma_slave_buswidth dev_width, unsigned int dma_length,
267 enum dma_transfer_direction direction)
268{
269 struct edma_chan *echan = to_edma_chan(chan);
270 struct device *dev = chan->device->dev;
271 int acnt, bcnt, ccnt, cidx;
272 int src_bidx, dst_bidx, src_cidx, dst_cidx;
273 int absync;
274
275 acnt = dev_width;
276 /*
277 * If the maxburst is equal to the fifo width, use
278 * A-synced transfers. This allows for large contiguous
279 * buffer transfers using only one PaRAM set.
280 */
281 if (burst == 1) {
282 /*
283 * For the A-sync case, bcnt and ccnt are the remainder
284 * and quotient respectively of the division of:
285 * (dma_length / acnt) by (SZ_64K -1). This is so
286 * that in case bcnt over flows, we have ccnt to use.
287 * Note: In A-sync tranfer only, bcntrld is used, but it
288 * only applies for sg_dma_len(sg) >= SZ_64K.
289 * In this case, the best way adopted is- bccnt for the
290 * first frame will be the remainder below. Then for
291 * every successive frame, bcnt will be SZ_64K-1. This
292 * is assured as bcntrld = 0xffff in end of function.
293 */
294 absync = false;
295 ccnt = dma_length / acnt / (SZ_64K - 1);
296 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
297 /*
298 * If bcnt is non-zero, we have a remainder and hence an
299 * extra frame to transfer, so increment ccnt.
300 */
301 if (bcnt)
302 ccnt++;
303 else
304 bcnt = SZ_64K - 1;
305 cidx = acnt;
306 } else {
307 /*
308 * If maxburst is greater than the fifo address_width,
309 * use AB-synced transfers where A count is the fifo
310 * address_width and B count is the maxburst. In this
311 * case, we are limited to transfers of C count frames
312 * of (address_width * maxburst) where C count is limited
313 * to SZ_64K-1. This places an upper bound on the length
314 * of an SG segment that can be handled.
315 */
316 absync = true;
317 bcnt = burst;
318 ccnt = dma_length / (acnt * bcnt);
319 if (ccnt > (SZ_64K - 1)) {
320 dev_err(dev, "Exceeded max SG segment size\n");
321 return -EINVAL;
322 }
323 cidx = acnt * bcnt;
324 }
325
326 if (direction == DMA_MEM_TO_DEV) {
327 src_bidx = acnt;
328 src_cidx = cidx;
329 dst_bidx = 0;
330 dst_cidx = 0;
331 } else if (direction == DMA_DEV_TO_MEM) {
332 src_bidx = 0;
333 src_cidx = 0;
334 dst_bidx = acnt;
335 dst_cidx = cidx;
336 } else {
337 dev_err(dev, "%s: direction not implemented yet\n", __func__);
338 return -EINVAL;
339 }
340
341 pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
342 /* Configure A or AB synchronized transfers */
343 if (absync)
344 pset->opt |= SYNCDIM;
345
346 pset->src = src_addr;
347 pset->dst = dst_addr;
348
349 pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
350 pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
351
352 pset->a_b_cnt = bcnt << 16 | acnt;
353 pset->ccnt = ccnt;
354 /*
355 * Only time when (bcntrld) auto reload is required is for
356 * A-sync case, and in this case, a requirement of reload value
357 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
358 * and then later will be populated by edma_execute.
359 */
360 pset->link_bcntrld = 0xffffffff;
361 return absync;
362}
363
253static struct dma_async_tx_descriptor *edma_prep_slave_sg( 364static struct dma_async_tx_descriptor *edma_prep_slave_sg(
254 struct dma_chan *chan, struct scatterlist *sgl, 365 struct dma_chan *chan, struct scatterlist *sgl,
255 unsigned int sg_len, enum dma_transfer_direction direction, 366 unsigned int sg_len, enum dma_transfer_direction direction,
@@ -258,23 +369,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
258 struct edma_chan *echan = to_edma_chan(chan); 369 struct edma_chan *echan = to_edma_chan(chan);
259 struct device *dev = chan->device->dev; 370 struct device *dev = chan->device->dev;
260 struct edma_desc *edesc; 371 struct edma_desc *edesc;
261 dma_addr_t dev_addr; 372 dma_addr_t src_addr = 0, dst_addr = 0;
262 enum dma_slave_buswidth dev_width; 373 enum dma_slave_buswidth dev_width;
263 u32 burst; 374 u32 burst;
264 struct scatterlist *sg; 375 struct scatterlist *sg;
265 int acnt, bcnt, ccnt, src, dst, cidx; 376 int i, nslots, ret;
266 int src_bidx, dst_bidx, src_cidx, dst_cidx;
267 int i, nslots;
268 377
269 if (unlikely(!echan || !sgl || !sg_len)) 378 if (unlikely(!echan || !sgl || !sg_len))
270 return NULL; 379 return NULL;
271 380
272 if (direction == DMA_DEV_TO_MEM) { 381 if (direction == DMA_DEV_TO_MEM) {
273 dev_addr = echan->cfg.src_addr; 382 src_addr = echan->cfg.src_addr;
274 dev_width = echan->cfg.src_addr_width; 383 dev_width = echan->cfg.src_addr_width;
275 burst = echan->cfg.src_maxburst; 384 burst = echan->cfg.src_maxburst;
276 } else if (direction == DMA_MEM_TO_DEV) { 385 } else if (direction == DMA_MEM_TO_DEV) {
277 dev_addr = echan->cfg.dst_addr; 386 dst_addr = echan->cfg.dst_addr;
278 dev_width = echan->cfg.dst_addr_width; 387 dev_width = echan->cfg.dst_addr_width;
279 burst = echan->cfg.dst_maxburst; 388 burst = echan->cfg.dst_maxburst;
280 } else { 389 } else {
@@ -313,63 +422,19 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
313 422
314 /* Configure PaRAM sets for each SG */ 423 /* Configure PaRAM sets for each SG */
315 for_each_sg(sgl, sg, sg_len, i) { 424 for_each_sg(sgl, sg, sg_len, i) {
425 /* Get address for each SG */
426 if (direction == DMA_DEV_TO_MEM)
427 dst_addr = sg_dma_address(sg);
428 else
429 src_addr = sg_dma_address(sg);
316 430
317 acnt = dev_width; 431 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
318 432 dst_addr, burst, dev_width,
319 /* 433 sg_dma_len(sg), direction);
320 * If the maxburst is equal to the fifo width, use 434 if (ret < 0)
321 * A-synced transfers. This allows for large contiguous 435 return NULL;
322 * buffer transfers using only one PaRAM set.
323 */
324 if (burst == 1) {
325 edesc->absync = false;
326 ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
327 bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
328 if (bcnt)
329 ccnt++;
330 else
331 bcnt = SZ_64K - 1;
332 cidx = acnt;
333 /*
334 * If maxburst is greater than the fifo address_width,
335 * use AB-synced transfers where A count is the fifo
336 * address_width and B count is the maxburst. In this
337 * case, we are limited to transfers of C count frames
338 * of (address_width * maxburst) where C count is limited
339 * to SZ_64K-1. This places an upper bound on the length
340 * of an SG segment that can be handled.
341 */
342 } else {
343 edesc->absync = true;
344 bcnt = burst;
345 ccnt = sg_dma_len(sg) / (acnt * bcnt);
346 if (ccnt > (SZ_64K - 1)) {
347 dev_err(dev, "Exceeded max SG segment size\n");
348 return NULL;
349 }
350 cidx = acnt * bcnt;
351 }
352
353 if (direction == DMA_MEM_TO_DEV) {
354 src = sg_dma_address(sg);
355 dst = dev_addr;
356 src_bidx = acnt;
357 src_cidx = cidx;
358 dst_bidx = 0;
359 dst_cidx = 0;
360 } else {
361 src = dev_addr;
362 dst = sg_dma_address(sg);
363 src_bidx = 0;
364 src_cidx = 0;
365 dst_bidx = acnt;
366 dst_cidx = cidx;
367 }
368 436
369 edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); 437 edesc->absync = ret;
370 /* Configure A or AB synchronized transfers */
371 if (edesc->absync)
372 edesc->pset[i].opt |= SYNCDIM;
373 438
374 /* If this is the last in a current SG set of transactions, 439 /* If this is the last in a current SG set of transactions,
375 enable interrupts so that next set is processed */ 440 enable interrupts so that next set is processed */
@@ -379,17 +444,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
379 /* If this is the last set, enable completion interrupt flag */ 444 /* If this is the last set, enable completion interrupt flag */
380 if (i == sg_len - 1) 445 if (i == sg_len - 1)
381 edesc->pset[i].opt |= TCINTEN; 446 edesc->pset[i].opt |= TCINTEN;
382
383 edesc->pset[i].src = src;
384 edesc->pset[i].dst = dst;
385
386 edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
387 edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
388
389 edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
390 edesc->pset[i].ccnt = ccnt;
391 edesc->pset[i].link_bcntrld = 0xffffffff;
392
393 } 447 }
394 448
395 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 449 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);