diff options
author | Maxime Ripard <maxime.ripard@free-electrons.com> | 2014-11-17 08:42:20 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2014-12-22 01:58:59 -0500 |
commit | db08425ebd51f3b4c73b0698ca3b0173ebd106be (patch) | |
tree | 78508c09736c2a09349d94b42d52f7978cc12b82 | |
parent | 701c1edbb4dc895d018312a7e4e5f2c673bf155c (diff) |
dmaengine: k3: Split device_control
Split the device_control callback of the Hisilicon K3 DMA driver to make use
of the newly introduced callbacks, that will eventually be used to retrieve
slave capabilities.
Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/k3dma.c | 197 |
1 files changed, 107 insertions, 90 deletions
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a1de14ab2c51..49be7f687c4c 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c | |||
@@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( | |||
441 | num = 0; | 441 | num = 0; |
442 | 442 | ||
443 | if (!c->ccfg) { | 443 | if (!c->ccfg) { |
444 | /* default is memtomem, without calling device_control */ | 444 | /* default is memtomem, without calling device_config */ |
445 | c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; | 445 | c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; |
446 | c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ | 446 | c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ |
447 | c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ | 447 | c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ |
@@ -523,112 +523,126 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( | |||
523 | return vchan_tx_prep(&c->vc, &ds->vd, flags); | 523 | return vchan_tx_prep(&c->vc, &ds->vd, flags); |
524 | } | 524 | } |
525 | 525 | ||
526 | static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 526 | static int k3_dma_config(struct dma_chan *chan, |
527 | unsigned long arg) | 527 | struct dma_slave_config *cfg) |
528 | { | ||
529 | struct k3_dma_chan *c = to_k3_chan(chan); | ||
530 | u32 maxburst = 0, val = 0; | ||
531 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
532 | |||
533 | if (cfg == NULL) | ||
534 | return -EINVAL; | ||
535 | c->dir = cfg->direction; | ||
536 | if (c->dir == DMA_DEV_TO_MEM) { | ||
537 | c->ccfg = CX_CFG_DSTINCR; | ||
538 | c->dev_addr = cfg->src_addr; | ||
539 | maxburst = cfg->src_maxburst; | ||
540 | width = cfg->src_addr_width; | ||
541 | } else if (c->dir == DMA_MEM_TO_DEV) { | ||
542 | c->ccfg = CX_CFG_SRCINCR; | ||
543 | c->dev_addr = cfg->dst_addr; | ||
544 | maxburst = cfg->dst_maxburst; | ||
545 | width = cfg->dst_addr_width; | ||
546 | } | ||
547 | switch (width) { | ||
548 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
549 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
550 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
551 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
552 | val = __ffs(width); | ||
553 | break; | ||
554 | default: | ||
555 | val = 3; | ||
556 | break; | ||
557 | } | ||
558 | c->ccfg |= (val << 12) | (val << 16); | ||
559 | |||
560 | if ((maxburst == 0) || (maxburst > 16)) | ||
561 | val = 16; | ||
562 | else | ||
563 | val = maxburst - 1; | ||
564 | c->ccfg |= (val << 20) | (val << 24); | ||
565 | c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; | ||
566 | |||
567 | /* specific request line */ | ||
568 | c->ccfg |= c->vc.chan.chan_id << 4; | ||
569 | |||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | static int k3_dma_terminate_all(struct dma_chan *chan) | ||
528 | { | 574 | { |
529 | struct k3_dma_chan *c = to_k3_chan(chan); | 575 | struct k3_dma_chan *c = to_k3_chan(chan); |
530 | struct k3_dma_dev *d = to_k3_dma(chan->device); | 576 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
531 | struct dma_slave_config *cfg = (void *)arg; | ||
532 | struct k3_dma_phy *p = c->phy; | 577 | struct k3_dma_phy *p = c->phy; |
533 | unsigned long flags; | 578 | unsigned long flags; |
534 | u32 maxburst = 0, val = 0; | ||
535 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
536 | LIST_HEAD(head); | 579 | LIST_HEAD(head); |
537 | 580 | ||
538 | switch (cmd) { | 581 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); |
539 | case DMA_SLAVE_CONFIG: | ||
540 | if (cfg == NULL) | ||
541 | return -EINVAL; | ||
542 | c->dir = cfg->direction; | ||
543 | if (c->dir == DMA_DEV_TO_MEM) { | ||
544 | c->ccfg = CX_CFG_DSTINCR; | ||
545 | c->dev_addr = cfg->src_addr; | ||
546 | maxburst = cfg->src_maxburst; | ||
547 | width = cfg->src_addr_width; | ||
548 | } else if (c->dir == DMA_MEM_TO_DEV) { | ||
549 | c->ccfg = CX_CFG_SRCINCR; | ||
550 | c->dev_addr = cfg->dst_addr; | ||
551 | maxburst = cfg->dst_maxburst; | ||
552 | width = cfg->dst_addr_width; | ||
553 | } | ||
554 | switch (width) { | ||
555 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
556 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
557 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
558 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
559 | val = __ffs(width); | ||
560 | break; | ||
561 | default: | ||
562 | val = 3; | ||
563 | break; | ||
564 | } | ||
565 | c->ccfg |= (val << 12) | (val << 16); | ||
566 | 582 | ||
567 | if ((maxburst == 0) || (maxburst > 16)) | 583 | /* Prevent this channel being scheduled */ |
568 | val = 16; | 584 | spin_lock(&d->lock); |
569 | else | 585 | list_del_init(&c->node); |
570 | val = maxburst - 1; | 586 | spin_unlock(&d->lock); |
571 | c->ccfg |= (val << 20) | (val << 24); | ||
572 | c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; | ||
573 | 587 | ||
574 | /* specific request line */ | 588 | /* Clear the tx descriptor lists */ |
575 | c->ccfg |= c->vc.chan.chan_id << 4; | 589 | spin_lock_irqsave(&c->vc.lock, flags); |
576 | break; | 590 | vchan_get_all_descriptors(&c->vc, &head); |
591 | if (p) { | ||
592 | /* vchan is assigned to a pchan - stop the channel */ | ||
593 | k3_dma_terminate_chan(p, d); | ||
594 | c->phy = NULL; | ||
595 | p->vchan = NULL; | ||
596 | p->ds_run = p->ds_done = NULL; | ||
597 | } | ||
598 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
599 | vchan_dma_desc_free_list(&c->vc, &head); | ||
577 | 600 | ||
578 | case DMA_TERMINATE_ALL: | 601 | return 0; |
579 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); | 602 | } |
580 | 603 | ||
581 | /* Prevent this channel being scheduled */ | 604 | static int k3_dma_pause(struct dma_chan *chan) |
582 | spin_lock(&d->lock); | 605 | { |
583 | list_del_init(&c->node); | 606 | struct k3_dma_chan *c = to_k3_chan(chan); |
584 | spin_unlock(&d->lock); | 607 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
608 | struct k3_dma_phy *p = c->phy; | ||
585 | 609 | ||
586 | /* Clear the tx descriptor lists */ | 610 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); |
587 | spin_lock_irqsave(&c->vc.lock, flags); | 611 | if (c->status == DMA_IN_PROGRESS) { |
588 | vchan_get_all_descriptors(&c->vc, &head); | 612 | c->status = DMA_PAUSED; |
589 | if (p) { | 613 | if (p) { |
590 | /* vchan is assigned to a pchan - stop the channel */ | 614 | k3_dma_pause_dma(p, false); |
591 | k3_dma_terminate_chan(p, d); | 615 | } else { |
592 | c->phy = NULL; | 616 | spin_lock(&d->lock); |
593 | p->vchan = NULL; | 617 | list_del_init(&c->node); |
594 | p->ds_run = p->ds_done = NULL; | 618 | spin_unlock(&d->lock); |
595 | } | 619 | } |
596 | spin_unlock_irqrestore(&c->vc.lock, flags); | 620 | } |
597 | vchan_dma_desc_free_list(&c->vc, &head); | ||
598 | break; | ||
599 | 621 | ||
600 | case DMA_PAUSE: | 622 | return 0; |
601 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); | 623 | } |
602 | if (c->status == DMA_IN_PROGRESS) { | ||
603 | c->status = DMA_PAUSED; | ||
604 | if (p) { | ||
605 | k3_dma_pause_dma(p, false); | ||
606 | } else { | ||
607 | spin_lock(&d->lock); | ||
608 | list_del_init(&c->node); | ||
609 | spin_unlock(&d->lock); | ||
610 | } | ||
611 | } | ||
612 | break; | ||
613 | 624 | ||
614 | case DMA_RESUME: | 625 | static int k3_dma_resume(struct dma_chan *chan) |
615 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); | 626 | { |
616 | spin_lock_irqsave(&c->vc.lock, flags); | 627 | struct k3_dma_chan *c = to_k3_chan(chan); |
617 | if (c->status == DMA_PAUSED) { | 628 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
618 | c->status = DMA_IN_PROGRESS; | 629 | struct k3_dma_phy *p = c->phy; |
619 | if (p) { | 630 | unsigned long flags; |
620 | k3_dma_pause_dma(p, true); | 631 | |
621 | } else if (!list_empty(&c->vc.desc_issued)) { | 632 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); |
622 | spin_lock(&d->lock); | 633 | spin_lock_irqsave(&c->vc.lock, flags); |
623 | list_add_tail(&c->node, &d->chan_pending); | 634 | if (c->status == DMA_PAUSED) { |
624 | spin_unlock(&d->lock); | 635 | c->status = DMA_IN_PROGRESS; |
625 | } | 636 | if (p) { |
637 | k3_dma_pause_dma(p, true); | ||
638 | } else if (!list_empty(&c->vc.desc_issued)) { | ||
639 | spin_lock(&d->lock); | ||
640 | list_add_tail(&c->node, &d->chan_pending); | ||
641 | spin_unlock(&d->lock); | ||
626 | } | 642 | } |
627 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
628 | break; | ||
629 | default: | ||
630 | return -ENXIO; | ||
631 | } | 643 | } |
644 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
645 | |||
632 | return 0; | 646 | return 0; |
633 | } | 647 | } |
634 | 648 | ||
@@ -720,7 +734,10 @@ static int k3_dma_probe(struct platform_device *op) | |||
720 | d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; | 734 | d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; |
721 | d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; | 735 | d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; |
722 | d->slave.device_issue_pending = k3_dma_issue_pending; | 736 | d->slave.device_issue_pending = k3_dma_issue_pending; |
723 | d->slave.device_control = k3_dma_control; | 737 | d->slave.device_config = k3_dma_config; |
738 | d->slave.device_pause = k3_dma_pause; | ||
739 | d->slave.device_resume = k3_dma_resume; | ||
740 | d->slave.device_terminate_all = k3_dma_terminate_all; | ||
724 | d->slave.copy_align = DMA_ALIGN; | 741 | d->slave.copy_align = DMA_ALIGN; |
725 | 742 | ||
726 | /* init virtual channel */ | 743 | /* init virtual channel */ |