aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma/sun6i-dma.c254
1 files changed, 188 insertions, 66 deletions
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 2db12e493c53..5065ca43face 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -146,6 +146,8 @@ struct sun6i_vchan {
146 struct dma_slave_config cfg; 146 struct dma_slave_config cfg;
147 struct sun6i_pchan *phy; 147 struct sun6i_pchan *phy;
148 u8 port; 148 u8 port;
149 u8 irq_type;
150 bool cyclic;
149}; 151};
150 152
151struct sun6i_dma_dev { 153struct sun6i_dma_dev {
@@ -254,6 +256,30 @@ static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width)
254 return addr_width >> 1; 256 return addr_width >> 1;
255} 257}
256 258
259static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan)
260{
261 struct sun6i_desc *txd = pchan->desc;
262 struct sun6i_dma_lli *lli;
263 size_t bytes;
264 dma_addr_t pos;
265
266 pos = readl(pchan->base + DMA_CHAN_LLI_ADDR);
267 bytes = readl(pchan->base + DMA_CHAN_CUR_CNT);
268
269 if (pos == LLI_LAST_ITEM)
270 return bytes;
271
272 for (lli = txd->v_lli; lli; lli = lli->v_lli_next) {
273 if (lli->p_lli_next == pos) {
274 for (lli = lli->v_lli_next; lli; lli = lli->v_lli_next)
275 bytes += lli->len;
276 break;
277 }
278 }
279
280 return bytes;
281}
282
257static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, 283static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
258 struct sun6i_dma_lli *next, 284 struct sun6i_dma_lli *next,
259 dma_addr_t next_phy, 285 dma_addr_t next_phy,
@@ -276,45 +302,6 @@ static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
276 return next; 302 return next;
277} 303}
278 304
279static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli,
280 dma_addr_t src,
281 dma_addr_t dst, u32 len,
282 struct dma_slave_config *config)
283{
284 u8 src_width, dst_width, src_burst, dst_burst;
285
286 if (!config)
287 return -EINVAL;
288
289 src_burst = convert_burst(config->src_maxburst);
290 if (src_burst)
291 return src_burst;
292
293 dst_burst = convert_burst(config->dst_maxburst);
294 if (dst_burst)
295 return dst_burst;
296
297 src_width = convert_buswidth(config->src_addr_width);
298 if (src_width)
299 return src_width;
300
301 dst_width = convert_buswidth(config->dst_addr_width);
302 if (dst_width)
303 return dst_width;
304
305 lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
306 DMA_CHAN_CFG_SRC_WIDTH(src_width) |
307 DMA_CHAN_CFG_DST_BURST(dst_burst) |
308 DMA_CHAN_CFG_DST_WIDTH(dst_width);
309
310 lli->src = src;
311 lli->dst = dst;
312 lli->len = len;
313 lli->para = NORMAL_WAIT;
314
315 return 0;
316}
317
318static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, 305static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
319 struct sun6i_dma_lli *lli) 306 struct sun6i_dma_lli *lli)
320{ 307{
@@ -381,9 +368,13 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
381 irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; 368 irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
382 irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; 369 irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
383 370
384 irq_val = readl(sdev->base + DMA_IRQ_EN(irq_offset)); 371 vchan->irq_type = vchan->cyclic ? DMA_IRQ_PKG : DMA_IRQ_QUEUE;
385 irq_val |= DMA_IRQ_QUEUE << (irq_offset * DMA_IRQ_CHAN_WIDTH); 372
386 writel(irq_val, sdev->base + DMA_IRQ_EN(irq_offset)); 373 irq_val = readl(sdev->base + DMA_IRQ_EN(irq_reg));
374 irq_val &= ~((DMA_IRQ_HALF | DMA_IRQ_PKG | DMA_IRQ_QUEUE) <<
375 (irq_offset * DMA_IRQ_CHAN_WIDTH));
376 irq_val |= vchan->irq_type << (irq_offset * DMA_IRQ_CHAN_WIDTH);
377 writel(irq_val, sdev->base + DMA_IRQ_EN(irq_reg));
387 378
388 writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR); 379 writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR);
389 writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE); 380 writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE);
@@ -479,11 +470,12 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
479 writel(status, sdev->base + DMA_IRQ_STAT(i)); 470 writel(status, sdev->base + DMA_IRQ_STAT(i));
480 471
481 for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) { 472 for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) {
482 if (status & DMA_IRQ_QUEUE) { 473 pchan = sdev->pchans + j;
483 pchan = sdev->pchans + j; 474 vchan = pchan->vchan;
484 vchan = pchan->vchan; 475 if (vchan && (status & vchan->irq_type)) {
485 476 if (vchan->cyclic) {
486 if (vchan) { 477 vchan_cyclic_callback(&pchan->desc->vd);
478 } else {
487 spin_lock(&vchan->vc.lock); 479 spin_lock(&vchan->vc.lock);
488 vchan_cookie_complete(&pchan->desc->vd); 480 vchan_cookie_complete(&pchan->desc->vd);
489 pchan->done = pchan->desc; 481 pchan->done = pchan->desc;
@@ -502,6 +494,55 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
502 return ret; 494 return ret;
503} 495}
504 496
497static int set_config(struct sun6i_dma_dev *sdev,
498 struct dma_slave_config *sconfig,
499 enum dma_transfer_direction direction,
500 u32 *p_cfg)
501{
502 s8 src_width, dst_width, src_burst, dst_burst;
503
504 switch (direction) {
505 case DMA_MEM_TO_DEV:
506 src_burst = convert_burst(sconfig->src_maxburst ?
507 sconfig->src_maxburst : 8);
508 src_width = convert_buswidth(sconfig->src_addr_width !=
509 DMA_SLAVE_BUSWIDTH_UNDEFINED ?
510 sconfig->src_addr_width :
511 DMA_SLAVE_BUSWIDTH_4_BYTES);
512 dst_burst = convert_burst(sconfig->dst_maxburst);
513 dst_width = convert_buswidth(sconfig->dst_addr_width);
514 break;
515 case DMA_DEV_TO_MEM:
516 src_burst = convert_burst(sconfig->src_maxburst);
517 src_width = convert_buswidth(sconfig->src_addr_width);
518 dst_burst = convert_burst(sconfig->dst_maxburst ?
519 sconfig->dst_maxburst : 8);
520 dst_width = convert_buswidth(sconfig->dst_addr_width !=
521 DMA_SLAVE_BUSWIDTH_UNDEFINED ?
522 sconfig->dst_addr_width :
523 DMA_SLAVE_BUSWIDTH_4_BYTES);
524 break;
525 default:
526 return -EINVAL;
527 }
528
529 if (src_burst < 0)
530 return src_burst;
531 if (src_width < 0)
532 return src_width;
533 if (dst_burst < 0)
534 return dst_burst;
535 if (dst_width < 0)
536 return dst_width;
537
538 *p_cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
539 DMA_CHAN_CFG_SRC_WIDTH(src_width) |
540 DMA_CHAN_CFG_DST_BURST(dst_burst) |
541 DMA_CHAN_CFG_DST_WIDTH(dst_width);
542
543 return 0;
544}
545
505static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( 546static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
506 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 547 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
507 size_t len, unsigned long flags) 548 size_t len, unsigned long flags)
@@ -569,13 +610,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
569 struct sun6i_desc *txd; 610 struct sun6i_desc *txd;
570 struct scatterlist *sg; 611 struct scatterlist *sg;
571 dma_addr_t p_lli; 612 dma_addr_t p_lli;
613 u32 lli_cfg;
572 int i, ret; 614 int i, ret;
573 615
574 if (!sgl) 616 if (!sgl)
575 return NULL; 617 return NULL;
576 618
577 if (!is_slave_direction(dir)) { 619 ret = set_config(sdev, sconfig, dir, &lli_cfg);
578 dev_err(chan2dev(chan), "Invalid DMA direction\n"); 620 if (ret) {
621 dev_err(chan2dev(chan), "Invalid DMA configuration\n");
579 return NULL; 622 return NULL;
580 } 623 }
581 624
@@ -588,14 +631,14 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
588 if (!v_lli) 631 if (!v_lli)
589 goto err_lli_free; 632 goto err_lli_free;
590 633
591 if (dir == DMA_MEM_TO_DEV) { 634 v_lli->len = sg_dma_len(sg);
592 ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg), 635 v_lli->para = NORMAL_WAIT;
593 sconfig->dst_addr, sg_dma_len(sg),
594 sconfig);
595 if (ret)
596 goto err_cur_lli_free;
597 636
598 v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE | 637 if (dir == DMA_MEM_TO_DEV) {
638 v_lli->src = sg_dma_address(sg);
639 v_lli->dst = sconfig->dst_addr;
640 v_lli->cfg = lli_cfg |
641 DMA_CHAN_CFG_DST_IO_MODE |
599 DMA_CHAN_CFG_SRC_LINEAR_MODE | 642 DMA_CHAN_CFG_SRC_LINEAR_MODE |
600 DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | 643 DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
601 DMA_CHAN_CFG_DST_DRQ(vchan->port); 644 DMA_CHAN_CFG_DST_DRQ(vchan->port);
@@ -607,13 +650,10 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
607 sg_dma_len(sg), flags); 650 sg_dma_len(sg), flags);
608 651
609 } else { 652 } else {
610 ret = sun6i_dma_cfg_lli(v_lli, sconfig->src_addr, 653 v_lli->src = sconfig->src_addr;
611 sg_dma_address(sg), sg_dma_len(sg), 654 v_lli->dst = sg_dma_address(sg);
612 sconfig); 655 v_lli->cfg = lli_cfg |
613 if (ret) 656 DMA_CHAN_CFG_DST_LINEAR_MODE |
614 goto err_cur_lli_free;
615
616 v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE |
617 DMA_CHAN_CFG_SRC_IO_MODE | 657 DMA_CHAN_CFG_SRC_IO_MODE |
618 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | 658 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
619 DMA_CHAN_CFG_SRC_DRQ(vchan->port); 659 DMA_CHAN_CFG_SRC_DRQ(vchan->port);
@@ -634,8 +674,78 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
634 674
635 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 675 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
636 676
637err_cur_lli_free: 677err_lli_free:
638 dma_pool_free(sdev->pool, v_lli, p_lli); 678 for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
679 dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
680 kfree(txd);
681 return NULL;
682}
683
684static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
685 struct dma_chan *chan,
686 dma_addr_t buf_addr,
687 size_t buf_len,
688 size_t period_len,
689 enum dma_transfer_direction dir,
690 unsigned long flags)
691{
692 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
693 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
694 struct dma_slave_config *sconfig = &vchan->cfg;
695 struct sun6i_dma_lli *v_lli, *prev = NULL;
696 struct sun6i_desc *txd;
697 dma_addr_t p_lli;
698 u32 lli_cfg;
699 unsigned int i, periods = buf_len / period_len;
700 int ret;
701
702 ret = set_config(sdev, sconfig, dir, &lli_cfg);
703 if (ret) {
704 dev_err(chan2dev(chan), "Invalid DMA configuration\n");
705 return NULL;
706 }
707
708 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
709 if (!txd)
710 return NULL;
711
712 for (i = 0; i < periods; i++) {
713 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
714 if (!v_lli) {
715 dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
716 goto err_lli_free;
717 }
718
719 v_lli->len = period_len;
720 v_lli->para = NORMAL_WAIT;
721
722 if (dir == DMA_MEM_TO_DEV) {
723 v_lli->src = buf_addr + period_len * i;
724 v_lli->dst = sconfig->dst_addr;
725 v_lli->cfg = lli_cfg |
726 DMA_CHAN_CFG_DST_IO_MODE |
727 DMA_CHAN_CFG_SRC_LINEAR_MODE |
728 DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
729 DMA_CHAN_CFG_DST_DRQ(vchan->port);
730 } else {
731 v_lli->src = sconfig->src_addr;
732 v_lli->dst = buf_addr + period_len * i;
733 v_lli->cfg = lli_cfg |
734 DMA_CHAN_CFG_DST_LINEAR_MODE |
735 DMA_CHAN_CFG_SRC_IO_MODE |
736 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
737 DMA_CHAN_CFG_SRC_DRQ(vchan->port);
738 }
739
740 prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd);
741 }
742
743 prev->p_lli_next = txd->p_lli; /* cyclic list */
744
745 vchan->cyclic = true;
746
747 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
748
639err_lli_free: 749err_lli_free:
640 for (prev = txd->v_lli; prev; prev = prev->v_lli_next) 750 for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
641 dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); 751 dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
@@ -712,6 +822,16 @@ static int sun6i_dma_terminate_all(struct dma_chan *chan)
712 822
713 spin_lock_irqsave(&vchan->vc.lock, flags); 823 spin_lock_irqsave(&vchan->vc.lock, flags);
714 824
825 if (vchan->cyclic) {
826 vchan->cyclic = false;
827 if (pchan && pchan->desc) {
828 struct virt_dma_desc *vd = &pchan->desc->vd;
829 struct virt_dma_chan *vc = &vchan->vc;
830
831 list_add_tail(&vd->node, &vc->desc_completed);
832 }
833 }
834
715 vchan_get_all_descriptors(&vchan->vc, &head); 835 vchan_get_all_descriptors(&vchan->vc, &head);
716 836
717 if (pchan) { 837 if (pchan) {
@@ -759,7 +879,7 @@ static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
759 } else if (!pchan || !pchan->desc) { 879 } else if (!pchan || !pchan->desc) {
760 bytes = 0; 880 bytes = 0;
761 } else { 881 } else {
762 bytes = readl(pchan->base + DMA_CHAN_CUR_CNT); 882 bytes = sun6i_get_chan_size(pchan);
763 } 883 }
764 884
765 spin_unlock_irqrestore(&vchan->vc.lock, flags); 885 spin_unlock_irqrestore(&vchan->vc.lock, flags);
@@ -963,6 +1083,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
963 dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask); 1083 dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask);
964 dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask); 1084 dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask);
965 dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask); 1085 dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
1086 dma_cap_set(DMA_CYCLIC, sdc->slave.cap_mask);
966 1087
967 INIT_LIST_HEAD(&sdc->slave.channels); 1088 INIT_LIST_HEAD(&sdc->slave.channels);
968 sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources; 1089 sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources;
@@ -970,6 +1091,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
970 sdc->slave.device_issue_pending = sun6i_dma_issue_pending; 1091 sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
971 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; 1092 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
972 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; 1093 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
1094 sdc->slave.device_prep_dma_cyclic = sun6i_dma_prep_dma_cyclic;
973 sdc->slave.copy_align = DMAENGINE_ALIGN_4_BYTES; 1095 sdc->slave.copy_align = DMAENGINE_ALIGN_4_BYTES;
974 sdc->slave.device_config = sun6i_dma_config; 1096 sdc->slave.device_config = sun6i_dma_config;
975 sdc->slave.device_pause = sun6i_dma_pause; 1097 sdc->slave.device_pause = sun6i_dma_pause;