aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ste_dma40.c
diff options
context:
space:
mode:
authorJonas Aaberg <jonas.aberg@stericsson.com>2010-08-09 08:08:26 -0400
committerDan Williams <dan.j.williams@intel.com>2010-09-22 17:53:45 -0400
commitaa182ae2621877e0c111922696c84c538b82ad14 (patch)
tree25a9ad5a841b63c99aae9b80e58da5a9b44498da /drivers/dma/ste_dma40.c
parent3ae0267fd569c2007235fb80cfe3b4a4c54c8f4b (diff)
DMAENGINE: ste_dma40: added support for link jobs in hw
If a new job is added on a physical channel that already has a job, the new job is linked in hw to the old job instead of queueing up the jobs. Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r--drivers/dma/ste_dma40.c314
1 files changed, 202 insertions, 112 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index c042103d7c0d..b8987e791055 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -92,6 +92,8 @@ struct d40_lli_pool {
92 * @node: List entry. 92 * @node: List entry.
93 * @dir: The transfer direction of this job. 93 * @dir: The transfer direction of this job.
94 * @is_in_client_list: true if the client owns this descriptor. 94 * @is_in_client_list: true if the client owns this descriptor.
95 * @is_hw_linked: true if this job will automatically be continued for
96 * the previous one.
95 * 97 *
96 * This descriptor is used for both logical and physical transfers. 98 * This descriptor is used for both logical and physical transfers.
97 */ 99 */
@@ -112,6 +114,7 @@ struct d40_desc {
112 114
113 enum dma_data_direction dir; 115 enum dma_data_direction dir;
114 bool is_in_client_list; 116 bool is_in_client_list;
117 bool is_hw_linked;
115}; 118};
116 119
117/** 120/**
@@ -340,9 +343,6 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
340 align); 343 align);
341 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, 344 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
342 align); 345 align);
343
344 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
345 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
346 } 346 }
347 347
348 return 0; 348 return 0;
@@ -357,22 +357,6 @@ static void d40_pool_lli_free(struct d40_desc *d40d)
357 d40d->lli_log.dst = NULL; 357 d40d->lli_log.dst = NULL;
358 d40d->lli_phy.src = NULL; 358 d40d->lli_phy.src = NULL;
359 d40d->lli_phy.dst = NULL; 359 d40d->lli_phy.dst = NULL;
360 d40d->lli_phy.src_addr = 0;
361 d40d->lli_phy.dst_addr = 0;
362}
363
364static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
365 struct d40_desc *desc)
366{
367 dma_cookie_t cookie = d40c->chan.cookie;
368
369 if (++cookie < 0)
370 cookie = 1;
371
372 d40c->chan.cookie = cookie;
373 desc->txd.cookie = cookie;
374
375 return cookie;
376} 360}
377 361
378static void d40_desc_remove(struct d40_desc *d40d) 362static void d40_desc_remove(struct d40_desc *d40d)
@@ -443,6 +427,18 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
443 return d; 427 return d;
444} 428}
445 429
430static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
431{
432 struct d40_desc *d;
433
434 if (list_empty(&d40c->queue))
435 return NULL;
436 list_for_each_entry(d, &d40c->queue, node)
437 if (list_is_last(&d->node, &d40c->queue))
438 break;
439 return d;
440}
441
446/* Support functions for logical channels */ 442/* Support functions for logical channels */
447 443
448static int d40_lcla_id_get(struct d40_chan *d40c) 444static int d40_lcla_id_get(struct d40_chan *d40c)
@@ -729,6 +725,161 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
729 d40d->lli_count += d40d->lli_tx_len; 725 d40d->lli_count += d40d->lli_tx_len;
730} 726}
731 727
728static u32 d40_residue(struct d40_chan *d40c)
729{
730 u32 num_elt;
731
732 if (d40c->log_num != D40_PHY_CHAN)
733 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
734 >> D40_MEM_LCSP2_ECNT_POS;
735 else
736 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
737 d40c->phy_chan->num * D40_DREG_PCDELTA +
738 D40_CHAN_REG_SDELT) &
739 D40_SREG_ELEM_PHY_ECNT_MASK) >>
740 D40_SREG_ELEM_PHY_ECNT_POS;
741 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
742}
743
744static bool d40_tx_is_linked(struct d40_chan *d40c)
745{
746 bool is_link;
747
748 if (d40c->log_num != D40_PHY_CHAN)
749 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
750 else
751 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
752 d40c->phy_chan->num * D40_DREG_PCDELTA +
753 D40_CHAN_REG_SDLNK) &
754 D40_SREG_LNK_PHYS_LNK_MASK;
755 return is_link;
756}
757
758static int d40_pause(struct dma_chan *chan)
759{
760 struct d40_chan *d40c =
761 container_of(chan, struct d40_chan, chan);
762 int res = 0;
763 unsigned long flags;
764
765 spin_lock_irqsave(&d40c->lock, flags);
766
767 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
768 if (res == 0) {
769 if (d40c->log_num != D40_PHY_CHAN) {
770 d40_config_set_event(d40c, false);
771 /* Resume the other logical channels if any */
772 if (d40_chan_has_events(d40c))
773 res = d40_channel_execute_command(d40c,
774 D40_DMA_RUN);
775 }
776 }
777
778 spin_unlock_irqrestore(&d40c->lock, flags);
779 return res;
780}
781
782static int d40_resume(struct dma_chan *chan)
783{
784 struct d40_chan *d40c =
785 container_of(chan, struct d40_chan, chan);
786 int res = 0;
787 unsigned long flags;
788
789 spin_lock_irqsave(&d40c->lock, flags);
790
791 if (d40c->base->rev == 0)
792 if (d40c->log_num != D40_PHY_CHAN) {
793 res = d40_channel_execute_command(d40c,
794 D40_DMA_SUSPEND_REQ);
795 goto no_suspend;
796 }
797
798 /* If bytes left to transfer or linked tx resume job */
799 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
800
801 if (d40c->log_num != D40_PHY_CHAN)
802 d40_config_set_event(d40c, true);
803
804 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
805 }
806
807no_suspend:
808 spin_unlock_irqrestore(&d40c->lock, flags);
809 return res;
810}
811
812static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
813{
814 /* TODO: Write */
815}
816
817static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
818{
819 struct d40_desc *d40d_prev = NULL;
820 int i;
821 u32 val;
822
823 if (!list_empty(&d40c->queue))
824 d40d_prev = d40_last_queued(d40c);
825 else if (!list_empty(&d40c->active))
826 d40d_prev = d40_first_active_get(d40c);
827
828 if (!d40d_prev)
829 return;
830
831 /* Here we try to join this job with previous jobs */
832 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
833 d40c->phy_chan->num * D40_DREG_PCDELTA +
834 D40_CHAN_REG_SSLNK);
835
836 /* Figure out which link we're currently transmitting */
837 for (i = 0; i < d40d_prev->lli_len; i++)
838 if (val == d40d_prev->lli_phy.src[i].reg_lnk)
839 break;
840
841 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
842 d40c->phy_chan->num * D40_DREG_PCDELTA +
843 D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
844
845 if (i == (d40d_prev->lli_len - 1) && val > 0) {
846 /* Change the current one */
847 writel(virt_to_phys(d40d->lli_phy.src),
848 d40c->base->virtbase + D40_DREG_PCBASE +
849 d40c->phy_chan->num * D40_DREG_PCDELTA +
850 D40_CHAN_REG_SSLNK);
851 writel(virt_to_phys(d40d->lli_phy.dst),
852 d40c->base->virtbase + D40_DREG_PCBASE +
853 d40c->phy_chan->num * D40_DREG_PCDELTA +
854 D40_CHAN_REG_SDLNK);
855
856 d40d->is_hw_linked = true;
857
858 } else if (i < d40d_prev->lli_len) {
859 (void) dma_unmap_single(d40c->base->dev,
860 virt_to_phys(d40d_prev->lli_phy.src),
861 d40d_prev->lli_pool.size,
862 DMA_TO_DEVICE);
863
864 /* Keep the settings */
865 val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
866 ~D40_SREG_LNK_PHYS_LNK_MASK;
867 d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
868 val | virt_to_phys(d40d->lli_phy.src);
869
870 val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
871 ~D40_SREG_LNK_PHYS_LNK_MASK;
872 d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
873 val | virt_to_phys(d40d->lli_phy.dst);
874
875 (void) dma_map_single(d40c->base->dev,
876 d40d_prev->lli_phy.src,
877 d40d_prev->lli_pool.size,
878 DMA_TO_DEVICE);
879 d40d->is_hw_linked = true;
880 }
881}
882
732static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 883static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
733{ 884{
734 struct d40_chan *d40c = container_of(tx->chan, 885 struct d40_chan *d40c = container_of(tx->chan,
@@ -737,14 +888,28 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
737 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 888 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
738 unsigned long flags; 889 unsigned long flags;
739 890
891 (void) d40_pause(&d40c->chan);
892
740 spin_lock_irqsave(&d40c->lock, flags); 893 spin_lock_irqsave(&d40c->lock, flags);
741 894
742 tx->cookie = d40_assign_cookie(d40c, d40d); 895 d40c->chan.cookie++;
896
897 if (d40c->chan.cookie < 0)
898 d40c->chan.cookie = 1;
899
900 d40d->txd.cookie = d40c->chan.cookie;
901
902 if (d40c->log_num == D40_PHY_CHAN)
903 d40_tx_submit_phy(d40c, d40d);
904 else
905 d40_tx_submit_log(d40c, d40d);
743 906
744 d40_desc_queue(d40c, d40d); 907 d40_desc_queue(d40c, d40d);
745 908
746 spin_unlock_irqrestore(&d40c->lock, flags); 909 spin_unlock_irqrestore(&d40c->lock, flags);
747 910
911 (void) d40_resume(&d40c->chan);
912
748 return tx->cookie; 913 return tx->cookie;
749} 914}
750 915
@@ -784,14 +949,20 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
784 /* Add to active queue */ 949 /* Add to active queue */
785 d40_desc_submit(d40c, d40d); 950 d40_desc_submit(d40c, d40d);
786 951
787 /* Initiate DMA job */ 952 /*
788 d40_desc_load(d40c, d40d); 953 * If this job is already linked in hw,
954 * do not submit it.
955 */
956 if (!d40d->is_hw_linked) {
957 /* Initiate DMA job */
958 d40_desc_load(d40c, d40d);
789 959
790 /* Start dma job */ 960 /* Start dma job */
791 err = d40_start(d40c); 961 err = d40_start(d40c);
792 962
793 if (err) 963 if (err)
794 return NULL; 964 return NULL;
965 }
795 } 966 }
796 967
797 return d40d; 968 return d40d;
@@ -1341,30 +1512,6 @@ static int d40_free_dma(struct d40_chan *d40c)
1341 return 0; 1512 return 0;
1342} 1513}
1343 1514
1344static int d40_pause(struct dma_chan *chan)
1345{
1346 struct d40_chan *d40c =
1347 container_of(chan, struct d40_chan, chan);
1348 int res;
1349 unsigned long flags;
1350
1351 spin_lock_irqsave(&d40c->lock, flags);
1352
1353 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1354 if (res == 0) {
1355 if (d40c->log_num != D40_PHY_CHAN) {
1356 d40_config_set_event(d40c, false);
1357 /* Resume the other logical channels if any */
1358 if (d40_chan_has_events(d40c))
1359 res = d40_channel_execute_command(d40c,
1360 D40_DMA_RUN);
1361 }
1362 }
1363
1364 spin_unlock_irqrestore(&d40c->lock, flags);
1365 return res;
1366}
1367
1368static bool d40_is_paused(struct d40_chan *d40c) 1515static bool d40_is_paused(struct d40_chan *d40c)
1369{ 1516{
1370 bool is_paused = false; 1517 bool is_paused = false;
@@ -1413,64 +1560,6 @@ _exit:
1413} 1560}
1414 1561
1415 1562
1416static bool d40_tx_is_linked(struct d40_chan *d40c)
1417{
1418 bool is_link;
1419
1420 if (d40c->log_num != D40_PHY_CHAN)
1421 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1422 else
1423 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1424 d40c->phy_chan->num * D40_DREG_PCDELTA +
1425 D40_CHAN_REG_SDLNK) &
1426 D40_SREG_LNK_PHYS_LNK_MASK;
1427 return is_link;
1428}
1429
1430static u32 d40_residue(struct d40_chan *d40c)
1431{
1432 u32 num_elt;
1433
1434 if (d40c->log_num != D40_PHY_CHAN)
1435 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1436 >> D40_MEM_LCSP2_ECNT_POS;
1437 else
1438 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1439 d40c->phy_chan->num * D40_DREG_PCDELTA +
1440 D40_CHAN_REG_SDELT) &
1441 D40_SREG_ELEM_PHY_ECNT_MASK) >>
1442 D40_SREG_ELEM_PHY_ECNT_POS;
1443 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1444}
1445
1446static int d40_resume(struct dma_chan *chan)
1447{
1448 struct d40_chan *d40c =
1449 container_of(chan, struct d40_chan, chan);
1450 int res = 0;
1451 unsigned long flags;
1452
1453 spin_lock_irqsave(&d40c->lock, flags);
1454
1455 if (d40c->base->rev == 0)
1456 if (d40c->log_num != D40_PHY_CHAN) {
1457 res = d40_channel_execute_command(d40c,
1458 D40_DMA_SUSPEND_REQ);
1459 goto no_suspend;
1460 }
1461
1462 /* If bytes left to transfer or linked tx resume job */
1463 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1464 if (d40c->log_num != D40_PHY_CHAN)
1465 d40_config_set_event(d40c, true);
1466 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1467 }
1468
1469no_suspend:
1470 spin_unlock_irqrestore(&d40c->lock, flags);
1471 return res;
1472}
1473
1474static u32 stedma40_residue(struct dma_chan *chan) 1563static u32 stedma40_residue(struct dma_chan *chan)
1475{ 1564{
1476 struct d40_chan *d40c = 1565 struct d40_chan *d40c =
@@ -1607,7 +1696,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1607 sgl_len, 1696 sgl_len,
1608 0, 1697 0,
1609 d40d->lli_phy.src, 1698 d40d->lli_phy.src,
1610 d40d->lli_phy.src_addr, 1699 virt_to_phys(d40d->lli_phy.src),
1611 d40c->src_def_cfg, 1700 d40c->src_def_cfg,
1612 d40c->dma_cfg.src_info.data_width, 1701 d40c->dma_cfg.src_info.data_width,
1613 d40c->dma_cfg.src_info.psize); 1702 d40c->dma_cfg.src_info.psize);
@@ -1619,7 +1708,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1619 sgl_len, 1708 sgl_len,
1620 0, 1709 0,
1621 d40d->lli_phy.dst, 1710 d40d->lli_phy.dst,
1622 d40d->lli_phy.dst_addr, 1711 virt_to_phys(d40d->lli_phy.dst),
1623 d40c->dst_def_cfg, 1712 d40c->dst_def_cfg,
1624 d40c->dma_cfg.dst_info.data_width, 1713 d40c->dma_cfg.dst_info.data_width,
1625 d40c->dma_cfg.dst_info.psize); 1714 d40c->dma_cfg.dst_info.psize);
@@ -1679,6 +1768,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
1679 * use default configuration (memcpy) 1768 * use default configuration (memcpy)
1680 */ 1769 */
1681 if (d40c->dma_cfg.channel_type == 0) { 1770 if (d40c->dma_cfg.channel_type == 0) {
1771
1682 err = d40_config_memcpy(d40c); 1772 err = d40_config_memcpy(d40c);
1683 if (err) { 1773 if (err) {
1684 dev_err(&d40c->chan.dev->device, 1774 dev_err(&d40c->chan.dev->device,
@@ -1957,7 +2047,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1957 sgl_len, 2047 sgl_len,
1958 src_dev_addr, 2048 src_dev_addr,
1959 d40d->lli_phy.src, 2049 d40d->lli_phy.src,
1960 d40d->lli_phy.src_addr, 2050 virt_to_phys(d40d->lli_phy.src),
1961 d40c->src_def_cfg, 2051 d40c->src_def_cfg,
1962 d40c->dma_cfg.src_info.data_width, 2052 d40c->dma_cfg.src_info.data_width,
1963 d40c->dma_cfg.src_info.psize); 2053 d40c->dma_cfg.src_info.psize);
@@ -1968,7 +2058,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1968 sgl_len, 2058 sgl_len,
1969 dst_dev_addr, 2059 dst_dev_addr,
1970 d40d->lli_phy.dst, 2060 d40d->lli_phy.dst,
1971 d40d->lli_phy.dst_addr, 2061 virt_to_phys(d40d->lli_phy.dst),
1972 d40c->dst_def_cfg, 2062 d40c->dst_def_cfg,
1973 d40c->dma_cfg.dst_info.data_width, 2063 d40c->dma_cfg.dst_info.data_width,
1974 d40c->dma_cfg.dst_info.psize); 2064 d40c->dma_cfg.dst_info.psize);