aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Ujfalusi <peter.ujfalusi@ti.com>2015-10-16 03:18:01 -0400
committerVinod Koul <vinod.koul@intel.com>2015-10-26 21:22:44 -0400
commit34cf30111cfccd18e1ccf2456f72dff6d42bd853 (patch)
treea7a2300e3253733095bcb65ccee94f6abcfcae94
parentdf6694f80365a72700d4c68fcf61ef068f5b3c25 (diff)
dmaengine: edma: Simplify function parameter list for channel operations
Instead of passing a pointer to struct edma_cc and the channel number, pass only the pointer to the edma_chan structure for the given channel. This struct contains all the information needed by the functions and the use of this makes it obvious that most of the sanity checks can be removed from the driver. Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/edma.c396
1 files changed, 123 insertions, 273 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index c0165e3d3396..a64befecf477 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -391,17 +391,19 @@ static inline void clear_bits(int offset, int len, unsigned long *p)
391 clear_bit(offset + (len - 1), p); 391 clear_bit(offset + (len - 1), p);
392} 392}
393 393
394static void edma_map_dmach_to_queue(struct edma_cc *ecc, unsigned ch_no, 394static void edma_map_dmach_to_queue(struct edma_chan *echan,
395 enum dma_event_q queue_no) 395 enum dma_event_q queue_no)
396{ 396{
397 int bit = (ch_no & 0x7) * 4; 397 struct edma_cc *ecc = echan->ecc;
398 int channel = EDMA_CHAN_SLOT(echan->ch_num);
399 int bit = (channel & 0x7) * 4;
398 400
399 /* default to low priority queue */ 401 /* default to low priority queue */
400 if (queue_no == EVENTQ_DEFAULT) 402 if (queue_no == EVENTQ_DEFAULT)
401 queue_no = ecc->default_queue; 403 queue_no = ecc->default_queue;
402 404
403 queue_no &= 7; 405 queue_no &= 7;
404 edma_modify_array(ecc, EDMA_DMAQNUM, (ch_no >> 3), ~(0x7 << bit), 406 edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
405 queue_no << bit); 407 queue_no << bit);
406} 408}
407 409
@@ -413,10 +415,12 @@ static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
413 edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit)); 415 edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
414} 416}
415 417
416static void edma_set_chmap(struct edma_cc *ecc, int channel, int slot) 418static void edma_set_chmap(struct edma_chan *echan, int slot)
417{ 419{
420 struct edma_cc *ecc = echan->ecc;
421 int channel = EDMA_CHAN_SLOT(echan->ch_num);
422
418 if (ecc->chmap_exist) { 423 if (ecc->chmap_exist) {
419 channel = EDMA_CHAN_SLOT(channel);
420 slot = EDMA_CHAN_SLOT(slot); 424 slot = EDMA_CHAN_SLOT(slot);
421 edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5)); 425 edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
422 } 426 }
@@ -476,18 +480,19 @@ static int prepare_unused_channel_list(struct device *dev, void *data)
476 return 0; 480 return 0;
477} 481}
478 482
479static void edma_setup_interrupt(struct edma_cc *ecc, unsigned lch, bool enable) 483static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
480{ 484{
481 lch = EDMA_CHAN_SLOT(lch); 485 struct edma_cc *ecc = echan->ecc;
486 int channel = EDMA_CHAN_SLOT(echan->ch_num);
482 487
483 if (enable) { 488 if (enable) {
484 edma_shadow0_write_array(ecc, SH_ICR, lch >> 5, 489 edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
485 BIT(lch & 0x1f)); 490 BIT(channel & 0x1f));
486 edma_shadow0_write_array(ecc, SH_IESR, lch >> 5, 491 edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
487 BIT(lch & 0x1f)); 492 BIT(channel & 0x1f));
488 } else { 493 } else {
489 edma_shadow0_write_array(ecc, SH_IECR, lch >> 5, 494 edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
490 BIT(lch & 0x1f)); 495 BIT(channel & 0x1f));
491 } 496 }
492} 497}
493 498
@@ -613,40 +618,25 @@ static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
613 return edma_read(ecc, offs); 618 return edma_read(ecc, offs);
614} 619}
615 620
616/*-----------------------------------------------------------------------*/ 621/*
617/**
618 * edma_start - start dma on a channel
619 * @ecc: pointer to edma_cc struct
620 * @channel: channel being activated
621 *
622 * Channels with event associations will be triggered by their hardware 622 * Channels with event associations will be triggered by their hardware
623 * events, and channels without such associations will be triggered by 623 * events, and channels without such associations will be triggered by
624 * software. (At this writing there is no interface for using software 624 * software. (At this writing there is no interface for using software
625 * triggers except with channels that don't support hardware triggers.) 625 * triggers except with channels that don't support hardware triggers.)
626 *
627 * Returns zero on success, else negative errno.
628 */ 626 */
629static int edma_start(struct edma_cc *ecc, unsigned channel) 627static void edma_start(struct edma_chan *echan)
630{ 628{
631 if (ecc->id != EDMA_CTLR(channel)) { 629 struct edma_cc *ecc = echan->ecc;
632 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 630 int channel = EDMA_CHAN_SLOT(echan->ch_num);
633 ecc->id, EDMA_CTLR(channel)); 631 int j = (channel >> 5);
634 return -EINVAL; 632 unsigned int mask = BIT(channel & 0x1f);
635 }
636 channel = EDMA_CHAN_SLOT(channel);
637
638 if (channel < ecc->num_channels) {
639 int j = channel >> 5;
640 unsigned int mask = BIT(channel & 0x1f);
641 633
634 if (test_bit(channel, ecc->channel_unused)) {
642 /* EDMA channels without event association */ 635 /* EDMA channels without event association */
643 if (test_bit(channel, ecc->channel_unused)) { 636 dev_dbg(ecc->dev, "ESR%d %08x\n", j,
644 dev_dbg(ecc->dev, "ESR%d %08x\n", j, 637 edma_shadow0_read_array(ecc, SH_ESR, j));
645 edma_shadow0_read_array(ecc, SH_ESR, j)); 638 edma_shadow0_write_array(ecc, SH_ESR, j, mask);
646 edma_shadow0_write_array(ecc, SH_ESR, j, mask); 639 } else {
647 return 0;
648 }
649
650 /* EDMA channel with event association */ 640 /* EDMA channel with event association */
651 dev_dbg(ecc->dev, "ER%d %08x\n", j, 641 dev_dbg(ecc->dev, "ER%d %08x\n", j,
652 edma_shadow0_read_array(ecc, SH_ER, j)); 642 edma_shadow0_read_array(ecc, SH_ER, j));
@@ -658,164 +648,86 @@ static int edma_start(struct edma_cc *ecc, unsigned channel)
658 edma_shadow0_write_array(ecc, SH_EESR, j, mask); 648 edma_shadow0_write_array(ecc, SH_EESR, j, mask);
659 dev_dbg(ecc->dev, "EER%d %08x\n", j, 649 dev_dbg(ecc->dev, "EER%d %08x\n", j,
660 edma_shadow0_read_array(ecc, SH_EER, j)); 650 edma_shadow0_read_array(ecc, SH_EER, j));
661 return 0;
662 } 651 }
663
664 return -EINVAL;
665} 652}
666 653
667/** 654static void edma_stop(struct edma_chan *echan)
668 * edma_stop - stops dma on the channel passed
669 * @ecc: pointer to edma_cc struct
670 * @channel: channel being deactivated
671 *
672 * Any active transfer is paused and all pending hardware events are cleared.
673 * The current transfer may not be resumed, and the channel's Parameter RAM
674 * should be reinitialized before being reused.
675 */
676static void edma_stop(struct edma_cc *ecc, unsigned channel)
677{ 655{
678 if (ecc->id != EDMA_CTLR(channel)) { 656 struct edma_cc *ecc = echan->ecc;
679 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 657 int channel = EDMA_CHAN_SLOT(echan->ch_num);
680 ecc->id, EDMA_CTLR(channel)); 658 int j = (channel >> 5);
681 return; 659 unsigned int mask = BIT(channel & 0x1f);
682 }
683 channel = EDMA_CHAN_SLOT(channel);
684 660
685 if (channel < ecc->num_channels) { 661 edma_shadow0_write_array(ecc, SH_EECR, j, mask);
686 int j = channel >> 5; 662 edma_shadow0_write_array(ecc, SH_ECR, j, mask);
687 unsigned int mask = BIT(channel & 0x1f); 663 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
664 edma_write_array(ecc, EDMA_EMCR, j, mask);
688 665
689 edma_shadow0_write_array(ecc, SH_EECR, j, mask); 666 /* clear possibly pending completion interrupt */
690 edma_shadow0_write_array(ecc, SH_ECR, j, mask); 667 edma_shadow0_write_array(ecc, SH_ICR, j, mask);
691 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
692 edma_write_array(ecc, EDMA_EMCR, j, mask);
693 668
694 /* clear possibly pending completion interrupt */ 669 dev_dbg(ecc->dev, "EER%d %08x\n", j,
695 edma_shadow0_write_array(ecc, SH_ICR, j, mask); 670 edma_shadow0_read_array(ecc, SH_EER, j));
696 671
697 dev_dbg(ecc->dev, "EER%d %08x\n", j, 672 /* REVISIT: consider guarding against inappropriate event
698 edma_shadow0_read_array(ecc, SH_EER, j)); 673 * chaining by overwriting with dummy_paramset.
699 674 */
700 /* REVISIT: consider guarding against inappropriate event
701 * chaining by overwriting with dummy_paramset.
702 */
703 }
704} 675}
705 676
706/* 677/*
707 * Temporarily disable EDMA hardware events on the specified channel, 678 * Temporarily disable EDMA hardware events on the specified channel,
708 * preventing them from triggering new transfers 679 * preventing them from triggering new transfers
709 */ 680 */
710static void edma_pause(struct edma_cc *ecc, unsigned channel) 681static void edma_pause(struct edma_chan *echan)
711{ 682{
712 if (ecc->id != EDMA_CTLR(channel)) { 683 int channel = EDMA_CHAN_SLOT(echan->ch_num);
713 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 684 unsigned int mask = BIT(channel & 0x1f);
714 ecc->id, EDMA_CTLR(channel));
715 return;
716 }
717 channel = EDMA_CHAN_SLOT(channel);
718
719 if (channel < ecc->num_channels) {
720 unsigned int mask = BIT(channel & 0x1f);
721 685
722 edma_shadow0_write_array(ecc, SH_EECR, channel >> 5, mask); 686 edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
723 }
724} 687}
725 688
726/* Re-enable EDMA hardware events on the specified channel. */ 689/* Re-enable EDMA hardware events on the specified channel. */
727static void edma_resume(struct edma_cc *ecc, unsigned channel) 690static void edma_resume(struct edma_chan *echan)
728{ 691{
729 if (ecc->id != EDMA_CTLR(channel)) { 692 int channel = EDMA_CHAN_SLOT(echan->ch_num);
730 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 693 unsigned int mask = BIT(channel & 0x1f);
731 ecc->id, EDMA_CTLR(channel));
732 return;
733 }
734 channel = EDMA_CHAN_SLOT(channel);
735
736 if (channel < ecc->num_channels) {
737 unsigned int mask = BIT(channel & 0x1f);
738 694
739 edma_shadow0_write_array(ecc, SH_EESR, channel >> 5, mask); 695 edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
740 }
741} 696}
742 697
743static int edma_trigger_channel(struct edma_cc *ecc, unsigned channel) 698static void edma_trigger_channel(struct edma_chan *echan)
744{ 699{
745 unsigned int mask; 700 struct edma_cc *ecc = echan->ecc;
746 701 int channel = EDMA_CHAN_SLOT(echan->ch_num);
747 if (ecc->id != EDMA_CTLR(channel)) { 702 unsigned int mask = BIT(channel & 0x1f);
748 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
749 ecc->id, EDMA_CTLR(channel));
750 return -EINVAL;
751 }
752 channel = EDMA_CHAN_SLOT(channel);
753 mask = BIT(channel & 0x1f);
754 703
755 edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask); 704 edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
756 705
757 dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5), 706 dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
758 edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5))); 707 edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
759 return 0;
760} 708}
761 709
762static void edma_clean_channel(struct edma_cc *ecc, unsigned channel) 710static void edma_clean_channel(struct edma_chan *echan)
763{ 711{
764 if (ecc->id != EDMA_CTLR(channel)) { 712 struct edma_cc *ecc = echan->ecc;
765 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 713 int channel = EDMA_CHAN_SLOT(echan->ch_num);
766 ecc->id, EDMA_CTLR(channel)); 714 int j = (channel >> 5);
767 return; 715 unsigned int mask = BIT(channel & 0x1f);
768 }
769 channel = EDMA_CHAN_SLOT(channel);
770
771 if (channel < ecc->num_channels) {
772 int j = (channel >> 5);
773 unsigned int mask = BIT(channel & 0x1f);
774 716
775 dev_dbg(ecc->dev, "EMR%d %08x\n", j, 717 dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
776 edma_read_array(ecc, EDMA_EMR, j)); 718 edma_shadow0_write_array(ecc, SH_ECR, j, mask);
777 edma_shadow0_write_array(ecc, SH_ECR, j, mask); 719 /* Clear the corresponding EMR bits */
778 /* Clear the corresponding EMR bits */ 720 edma_write_array(ecc, EDMA_EMCR, j, mask);
779 edma_write_array(ecc, EDMA_EMCR, j, mask); 721 /* Clear any SER */
780 /* Clear any SER */ 722 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
781 edma_shadow0_write_array(ecc, SH_SECR, j, mask); 723 edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
782 edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
783 }
784} 724}
785 725
786/** 726static int edma_alloc_channel(struct edma_chan *echan,
787 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
788 * @ecc: pointer to edma_cc struct
789 * @channel: specific channel to allocate; negative for "any unmapped channel"
790 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
791 * Controller (TC) executes requests using this channel. Use
792 * EVENTQ_DEFAULT unless you really need a high priority queue.
793 *
794 * This allocates a DMA channel and its associated parameter RAM slot.
795 * The parameter RAM is initialized to hold a dummy transfer.
796 *
797 * Normal use is to pass a specific channel number as @channel, to make
798 * use of hardware events mapped to that channel. When the channel will
799 * be used only for software triggering or event chaining, channels not
800 * mapped to hardware events (or mapped to unused events) are preferable.
801 *
802 * DMA transfers start from a channel using edma_start(), or by
803 * chaining. When the transfer described in that channel's parameter RAM
804 * slot completes, that slot's data may be reloaded through a link.
805 *
806 * DMA errors are only reported to the @callback associated with the
807 * channel driving that transfer, but transfer completion callbacks can
808 * be sent to another channel under control of the TCC field in
809 * the option word of the transfer's parameter RAM set. Drivers must not
810 * use DMA transfer completion callbacks for channels they did not allocate.
811 * (The same applies to TCC codes used in transfer chaining.)
812 *
813 * Returns the number of the channel, else negative errno.
814 */
815static int edma_alloc_channel(struct edma_cc *ecc, int channel,
816 enum dma_event_q eventq_no) 727 enum dma_event_q eventq_no)
817{ 728{
818 int ret = 0; 729 struct edma_cc *ecc = echan->ecc;
730 int channel = EDMA_CHAN_SLOT(echan->ch_num);
819 731
820 if (!ecc->unused_chan_list_done) { 732 if (!ecc->unused_chan_list_done) {
821 /* 733 /*
@@ -823,86 +735,40 @@ static int edma_alloc_channel(struct edma_cc *ecc, int channel,
823 * used and clear them in the unused list, making the rest 735 * used and clear them in the unused list, making the rest
824 * available for ARM usage. 736 * available for ARM usage.
825 */ 737 */
826 ret = bus_for_each_dev(&platform_bus_type, NULL, ecc, 738 int ret = bus_for_each_dev(&platform_bus_type, NULL, ecc,
827 prepare_unused_channel_list); 739 prepare_unused_channel_list);
828 if (ret < 0) 740 if (ret < 0)
829 return ret; 741 return ret;
830 742
831 ecc->unused_chan_list_done = true; 743 ecc->unused_chan_list_done = true;
832 } 744 }
833 745
834 if (channel >= 0) {
835 if (ecc->id != EDMA_CTLR(channel)) {
836 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n",
837 __func__, ecc->id, EDMA_CTLR(channel));
838 return -EINVAL;
839 }
840 channel = EDMA_CHAN_SLOT(channel);
841 }
842
843 if (channel < 0) {
844 channel = find_next_bit(ecc->channel_unused, ecc->num_channels,
845 0);
846 if (channel == ecc->num_channels)
847 return -EBUSY;
848 } else if (channel >= ecc->num_channels) {
849 return -EINVAL;
850 }
851
852 /* ensure access through shadow region 0 */ 746 /* ensure access through shadow region 0 */
853 edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); 747 edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
854 748
855 /* ensure no events are pending */ 749 /* ensure no events are pending */
856 edma_stop(ecc, EDMA_CTLR_CHAN(ecc->id, channel)); 750 edma_stop(echan);
857 751
858 edma_setup_interrupt(ecc, EDMA_CTLR_CHAN(ecc->id, channel), true); 752 edma_setup_interrupt(echan, true);
859 753
860 edma_map_dmach_to_queue(ecc, channel, eventq_no); 754 edma_map_dmach_to_queue(echan, eventq_no);
861 755
862 return EDMA_CTLR_CHAN(ecc->id, channel); 756 return 0;
863} 757}
864 758
865/** 759static void edma_free_channel(struct edma_chan *echan)
866 * edma_free_channel - deallocate DMA channel
867 * @ecc: pointer to edma_cc struct
868 * @channel: dma channel returned from edma_alloc_channel()
869 *
870 * This deallocates the DMA channel and associated parameter RAM slot
871 * allocated by edma_alloc_channel().
872 *
873 * Callers are responsible for ensuring the channel is inactive, and
874 * will not be reactivated by linking, chaining, or software calls to
875 * edma_start().
876 */
877static void edma_free_channel(struct edma_cc *ecc, unsigned channel)
878{ 760{
879 if (ecc->id != EDMA_CTLR(channel)) { 761 /* ensure no events are pending */
880 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 762 edma_stop(echan);
881 ecc->id, EDMA_CTLR(channel));
882 return;
883 }
884 channel = EDMA_CHAN_SLOT(channel);
885
886 if (channel >= ecc->num_channels)
887 return;
888
889 /* REVISIT should probably take out of shadow region 0 */ 763 /* REVISIT should probably take out of shadow region 0 */
890 edma_setup_interrupt(ecc, channel, false); 764 edma_setup_interrupt(echan, false);
891} 765}
892 766
893/* Move channel to a specific event queue */ 767/* Move channel to a specific event queue */
894static void edma_assign_channel_eventq(struct edma_cc *ecc, unsigned channel, 768static void edma_assign_channel_eventq(struct edma_chan *echan,
895 enum dma_event_q eventq_no) 769 enum dma_event_q eventq_no)
896{ 770{
897 if (ecc->id != EDMA_CTLR(channel)) { 771 struct edma_cc *ecc = echan->ecc;
898 dev_err(ecc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__,
899 ecc->id, EDMA_CTLR(channel));
900 return;
901 }
902 channel = EDMA_CHAN_SLOT(channel);
903
904 if (channel >= ecc->num_channels)
905 return;
906 772
907 /* default to low priority queue */ 773 /* default to low priority queue */
908 if (eventq_no == EVENTQ_DEFAULT) 774 if (eventq_no == EVENTQ_DEFAULT)
@@ -910,7 +776,7 @@ static void edma_assign_channel_eventq(struct edma_cc *ecc, unsigned channel,
910 if (eventq_no >= ecc->num_tc) 776 if (eventq_no >= ecc->num_tc)
911 return; 777 return;
912 778
913 edma_map_dmach_to_queue(ecc, channel, eventq_no); 779 edma_map_dmach_to_queue(echan, eventq_no);
914} 780}
915 781
916static inline struct edma_cc *to_edma_cc(struct dma_device *d) 782static inline struct edma_cc *to_edma_cc(struct dma_device *d)
@@ -1011,19 +877,19 @@ static void edma_execute(struct edma_chan *echan)
1011 * transfers of MAX_NR_SG 877 * transfers of MAX_NR_SG
1012 */ 878 */
1013 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); 879 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
1014 edma_clean_channel(ecc, echan->ch_num); 880 edma_clean_channel(echan);
1015 edma_stop(ecc, echan->ch_num); 881 edma_stop(echan);
1016 edma_start(ecc, echan->ch_num); 882 edma_start(echan);
1017 edma_trigger_channel(ecc, echan->ch_num); 883 edma_trigger_channel(echan);
1018 echan->missed = 0; 884 echan->missed = 0;
1019 } else if (edesc->processed <= MAX_NR_SG) { 885 } else if (edesc->processed <= MAX_NR_SG) {
1020 dev_dbg(dev, "first transfer starting on channel %d\n", 886 dev_dbg(dev, "first transfer starting on channel %d\n",
1021 echan->ch_num); 887 echan->ch_num);
1022 edma_start(ecc, echan->ch_num); 888 edma_start(echan);
1023 } else { 889 } else {
1024 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", 890 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
1025 echan->ch_num, edesc->processed); 891 echan->ch_num, edesc->processed);
1026 edma_resume(ecc, echan->ch_num); 892 edma_resume(echan);
1027 } 893 }
1028} 894}
1029 895
@@ -1041,11 +907,10 @@ static int edma_terminate_all(struct dma_chan *chan)
1041 * echan->edesc is NULL and exit.) 907 * echan->edesc is NULL and exit.)
1042 */ 908 */
1043 if (echan->edesc) { 909 if (echan->edesc) {
1044 edma_stop(echan->ecc, echan->ch_num); 910 edma_stop(echan);
1045 /* Move the cyclic channel back to default queue */ 911 /* Move the cyclic channel back to default queue */
1046 if (echan->edesc->cyclic) 912 if (echan->edesc->cyclic)
1047 edma_assign_channel_eventq(echan->ecc, echan->ch_num, 913 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
1048 EVENTQ_DEFAULT);
1049 /* 914 /*
1050 * free the running request descriptor 915 * free the running request descriptor
1051 * since it is not in any of the vdesc lists 916 * since it is not in any of the vdesc lists
@@ -1082,7 +947,7 @@ static int edma_dma_pause(struct dma_chan *chan)
1082 if (!echan->edesc) 947 if (!echan->edesc)
1083 return -EINVAL; 948 return -EINVAL;
1084 949
1085 edma_pause(echan->ecc, echan->ch_num); 950 edma_pause(echan);
1086 return 0; 951 return 0;
1087} 952}
1088 953
@@ -1090,7 +955,7 @@ static int edma_dma_resume(struct dma_chan *chan)
1090{ 955{
1091 struct edma_chan *echan = to_edma_chan(chan); 956 struct edma_chan *echan = to_edma_chan(chan);
1092 957
1093 edma_resume(echan->ecc, echan->ch_num); 958 edma_resume(echan);
1094 return 0; 959 return 0;
1095} 960}
1096 961
@@ -1548,14 +1413,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1548 } 1413 }
1549 1414
1550 /* Place the cyclic channel to highest priority queue */ 1415 /* Place the cyclic channel to highest priority queue */
1551 edma_assign_channel_eventq(echan->ecc, echan->ch_num, EVENTQ_0); 1416 edma_assign_channel_eventq(echan, EVENTQ_0);
1552 1417
1553 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 1418 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1554} 1419}
1555 1420
1556static void edma_completion_handler(struct edma_chan *echan) 1421static void edma_completion_handler(struct edma_chan *echan)
1557{ 1422{
1558 struct edma_cc *ecc = echan->ecc;
1559 struct device *dev = echan->vchan.chan.device->dev; 1423 struct device *dev = echan->vchan.chan.device->dev;
1560 struct edma_desc *edesc = echan->edesc; 1424 struct edma_desc *edesc = echan->edesc;
1561 1425
@@ -1569,7 +1433,7 @@ static void edma_completion_handler(struct edma_chan *echan)
1569 return; 1433 return;
1570 } else if (edesc->processed == edesc->pset_nr) { 1434 } else if (edesc->processed == edesc->pset_nr) {
1571 edesc->residue = 0; 1435 edesc->residue = 0;
1572 edma_stop(ecc, echan->ch_num); 1436 edma_stop(echan);
1573 vchan_cookie_complete(&edesc->vdesc); 1437 vchan_cookie_complete(&edesc->vdesc);
1574 echan->edesc = NULL; 1438 echan->edesc = NULL;
1575 1439
@@ -1579,7 +1443,7 @@ static void edma_completion_handler(struct edma_chan *echan)
1579 dev_dbg(dev, "Sub transfer completed on channel %d\n", 1443 dev_dbg(dev, "Sub transfer completed on channel %d\n",
1580 echan->ch_num); 1444 echan->ch_num);
1581 1445
1582 edma_pause(ecc, echan->ch_num); 1446 edma_pause(echan);
1583 1447
1584 /* Update statistics for tx_status */ 1448 /* Update statistics for tx_status */
1585 edesc->residue -= edesc->sg_len; 1449 edesc->residue -= edesc->sg_len;
@@ -1670,10 +1534,10 @@ static void edma_error_handler(struct edma_chan *echan)
1670 * missed, so its safe to issue it here. 1534 * missed, so its safe to issue it here.
1671 */ 1535 */
1672 dev_dbg(dev, "Missed event, TRIGGERING\n"); 1536 dev_dbg(dev, "Missed event, TRIGGERING\n");
1673 edma_clean_channel(ecc, echan->ch_num); 1537 edma_clean_channel(echan);
1674 edma_stop(ecc, echan->ch_num); 1538 edma_stop(echan);
1675 edma_start(ecc, echan->ch_num); 1539 edma_start(echan);
1676 edma_trigger_channel(ecc, echan->ch_num); 1540 edma_trigger_channel(echan);
1677 } 1541 }
1678 spin_unlock(&echan->vchan.lock); 1542 spin_unlock(&echan->vchan.lock);
1679} 1543}
@@ -1761,43 +1625,29 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
1761 struct edma_chan *echan = to_edma_chan(chan); 1625 struct edma_chan *echan = to_edma_chan(chan);
1762 struct device *dev = chan->device->dev; 1626 struct device *dev = chan->device->dev;
1763 int ret; 1627 int ret;
1764 int a_ch_num;
1765 LIST_HEAD(descs);
1766
1767 a_ch_num = edma_alloc_channel(echan->ecc, echan->ch_num, EVENTQ_DEFAULT);
1768
1769 if (a_ch_num < 0) {
1770 ret = -ENODEV;
1771 goto err_no_chan;
1772 }
1773 1628
1774 if (a_ch_num != echan->ch_num) { 1629 ret = edma_alloc_channel(echan, EVENTQ_DEFAULT);
1775 dev_err(dev, "failed to allocate requested channel %u:%u\n", 1630 if (ret)
1776 EDMA_CTLR(echan->ch_num), 1631 return ret;
1777 EDMA_CHAN_SLOT(echan->ch_num));
1778 ret = -ENODEV;
1779 goto err_wrong_chan;
1780 }
1781 1632
1782 echan->alloced = true;
1783 echan->slot[0] = edma_alloc_slot(echan->ecc, echan->ch_num); 1633 echan->slot[0] = edma_alloc_slot(echan->ecc, echan->ch_num);
1784 if (echan->slot[0] < 0) { 1634 if (echan->slot[0] < 0) {
1785 dev_err(dev, "Entry slot allocation failed for channel %u\n", 1635 dev_err(dev, "Entry slot allocation failed for channel %u\n",
1786 EDMA_CHAN_SLOT(echan->ch_num)); 1636 EDMA_CHAN_SLOT(echan->ch_num));
1787 goto err_wrong_chan; 1637 goto err_slot;
1788 } 1638 }
1789 1639
1790 /* Set up channel -> slot mapping for the entry slot */ 1640 /* Set up channel -> slot mapping for the entry slot */
1791 edma_set_chmap(echan->ecc, echan->ch_num, echan->slot[0]); 1641 edma_set_chmap(echan, echan->slot[0]);
1642 echan->alloced = true;
1792 1643
1793 dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num, 1644 dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
1794 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 1645 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
1795 1646
1796 return 0; 1647 return 0;
1797 1648
1798err_wrong_chan: 1649err_slot:
1799 edma_free_channel(echan->ecc, a_ch_num); 1650 edma_free_channel(echan);
1800err_no_chan:
1801 return ret; 1651 return ret;
1802} 1652}
1803 1653
@@ -1808,7 +1658,7 @@ static void edma_free_chan_resources(struct dma_chan *chan)
1808 int i; 1658 int i;
1809 1659
1810 /* Terminate transfers */ 1660 /* Terminate transfers */
1811 edma_stop(echan->ecc, echan->ch_num); 1661 edma_stop(echan);
1812 1662
1813 vchan_free_chan_resources(&echan->vchan); 1663 vchan_free_chan_resources(&echan->vchan);
1814 1664
@@ -1821,11 +1671,11 @@ static void edma_free_chan_resources(struct dma_chan *chan)
1821 } 1671 }
1822 1672
1823 /* Set entry slot to the dummy slot */ 1673 /* Set entry slot to the dummy slot */
1824 edma_set_chmap(echan->ecc, echan->ch_num, echan->ecc->dummy_slot); 1674 edma_set_chmap(echan, echan->ecc->dummy_slot);
1825 1675
1826 /* Free EDMA channel */ 1676 /* Free EDMA channel */
1827 if (echan->alloced) { 1677 if (echan->alloced) {
1828 edma_free_channel(echan->ecc, echan->ch_num); 1678 edma_free_channel(echan);
1829 echan->alloced = false; 1679 echan->alloced = false;
1830 } 1680 }
1831 1681
@@ -2279,13 +2129,6 @@ static int edma_probe(struct platform_device *pdev)
2279 return ecc->dummy_slot; 2129 return ecc->dummy_slot;
2280 } 2130 }
2281 2131
2282 for (i = 0; i < ecc->num_channels; i++) {
2283 /* Assign all channels to the default queue */
2284 edma_map_dmach_to_queue(ecc, i, info->default_queue);
2285 /* Set entry slot to the dummy slot */
2286 edma_set_chmap(ecc, i, ecc->dummy_slot);
2287 }
2288
2289 queue_priority_mapping = info->queue_priority_mapping; 2132 queue_priority_mapping = info->queue_priority_mapping;
2290 2133
2291 /* Event queue priority mapping */ 2134 /* Event queue priority mapping */
@@ -2309,6 +2152,14 @@ static int edma_probe(struct platform_device *pdev)
2309 2152
2310 edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); 2153 edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
2311 2154
2155 for (i = 0; i < ecc->num_channels; i++) {
2156 /* Assign all channels to the default queue */
2157 edma_map_dmach_to_queue(&ecc->slave_chans[i],
2158 info->default_queue);
2159 /* Set entry slot to the dummy slot */
2160 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2161 }
2162
2312 ret = dma_async_device_register(&ecc->dma_slave); 2163 ret = dma_async_device_register(&ecc->dma_slave);
2313 if (ret) 2164 if (ret)
2314 goto err_reg1; 2165 goto err_reg1;
@@ -2360,11 +2211,10 @@ static int edma_pm_resume(struct device *dev)
2360 edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5, 2211 edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
2361 BIT(i & 0x1f)); 2212 BIT(i & 0x1f));
2362 2213
2363 edma_setup_interrupt(ecc, EDMA_CTLR_CHAN(ecc->id, i), 2214 edma_setup_interrupt(&echan[i], true);
2364 true);
2365 2215
2366 /* Set up channel -> slot mapping for the entry slot */ 2216 /* Set up channel -> slot mapping for the entry slot */
2367 edma_set_chmap(ecc, echan[i].ch_num, echan[i].slot[0]); 2217 edma_set_chmap(&echan[i], echan[i].slot[0]);
2368 } 2218 }
2369 } 2219 }
2370 2220