aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma/amba-pl08x.c79
1 files changed, 32 insertions, 47 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 54e3eb0b3723..e04ca0b01f98 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -168,7 +168,6 @@ struct pl08x_sg {
168 * @tx: async tx descriptor 168 * @tx: async tx descriptor
169 * @node: node for txd list for channels 169 * @node: node for txd list for channels
170 * @dsg_list: list of children sg's 170 * @dsg_list: list of children sg's
171 * @direction: direction of transfer
172 * @llis_bus: DMA memory address (physical) start for the LLIs 171 * @llis_bus: DMA memory address (physical) start for the LLIs
173 * @llis_va: virtual memory address start for the LLIs 172 * @llis_va: virtual memory address start for the LLIs
174 * @cctl: control reg values for current txd 173 * @cctl: control reg values for current txd
@@ -178,7 +177,6 @@ struct pl08x_txd {
178 struct dma_async_tx_descriptor tx; 177 struct dma_async_tx_descriptor tx;
179 struct list_head node; 178 struct list_head node;
180 struct list_head dsg_list; 179 struct list_head dsg_list;
181 enum dma_transfer_direction direction;
182 dma_addr_t llis_bus; 180 dma_addr_t llis_bus;
183 struct pl08x_lli *llis_va; 181 struct pl08x_lli *llis_va;
184 /* Default cctl value for LLIs */ 182 /* Default cctl value for LLIs */
@@ -997,6 +995,7 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
997 if (!list_empty(&plchan->pend_list)) { 995 if (!list_empty(&plchan->pend_list)) {
998 list_for_each_entry_safe(txdi, 996 list_for_each_entry_safe(txdi,
999 next, &plchan->pend_list, node) { 997 next, &plchan->pend_list, node) {
998 pl08x_release_mux(plchan);
1000 list_del(&txdi->node); 999 list_del(&txdi->node);
1001 pl08x_free_txd(pl08x, txdi); 1000 pl08x_free_txd(pl08x, txdi);
1002 } 1001 }
@@ -1018,12 +1017,10 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
1018/* 1017/*
1019 * This should be called with the channel plchan->lock held 1018 * This should be called with the channel plchan->lock held
1020 */ 1019 */
1021static int prep_phy_channel(struct pl08x_dma_chan *plchan, 1020static int prep_phy_channel(struct pl08x_dma_chan *plchan)
1022 struct pl08x_txd *txd)
1023{ 1021{
1024 struct pl08x_driver_data *pl08x = plchan->host; 1022 struct pl08x_driver_data *pl08x = plchan->host;
1025 struct pl08x_phy_chan *ch; 1023 struct pl08x_phy_chan *ch;
1026 int ret;
1027 1024
1028 /* Check if we already have a channel */ 1025 /* Check if we already have a channel */
1029 if (plchan->phychan) { 1026 if (plchan->phychan) {
@@ -1038,36 +1035,11 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
1038 return -EBUSY; 1035 return -EBUSY;
1039 } 1036 }
1040 1037
1041 /*
1042 * OK we have a physical channel: for memcpy() this is all we
1043 * need, but for slaves the physical signals may be muxed!
1044 * Can the platform allow us to use this channel?
1045 */
1046 if (plchan->slave) {
1047 ret = pl08x_request_mux(plchan);
1048 if (ret < 0) {
1049 dev_dbg(&pl08x->adev->dev,
1050 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
1051 ch->id, plchan->name);
1052 /* Release physical channel & return */
1053 pl08x_put_phy_channel(pl08x, ch);
1054 return -EBUSY;
1055 }
1056 }
1057
1058 plchan->phychan = ch; 1038 plchan->phychan = ch;
1059 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", 1039 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
1060 ch->id, 1040 ch->id, plchan->name);
1061 plchan->signal,
1062 plchan->name);
1063 1041
1064got_channel: 1042got_channel:
1065 /* Assign the flow control signal to this channel */
1066 if (txd->direction == DMA_MEM_TO_DEV)
1067 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
1068 else if (txd->direction == DMA_DEV_TO_MEM)
1069 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1070
1071 plchan->phychan_hold++; 1043 plchan->phychan_hold++;
1072 1044
1073 return 0; 1045 return 0;
@@ -1077,7 +1049,6 @@ static void release_phy_channel(struct pl08x_dma_chan *plchan)
1077{ 1049{
1078 struct pl08x_driver_data *pl08x = plchan->host; 1050 struct pl08x_driver_data *pl08x = plchan->host;
1079 1051
1080 pl08x_release_mux(plchan);
1081 pl08x_put_phy_channel(pl08x, plchan->phychan); 1052 pl08x_put_phy_channel(pl08x, plchan->phychan);
1082 plchan->phychan = NULL; 1053 plchan->phychan = NULL;
1083} 1054}
@@ -1340,19 +1311,12 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1340 * See if we already have a physical channel allocated, 1311 * See if we already have a physical channel allocated,
1341 * else this is the time to try to get one. 1312 * else this is the time to try to get one.
1342 */ 1313 */
1343 ret = prep_phy_channel(plchan, txd); 1314 ret = prep_phy_channel(plchan);
1344 if (ret) { 1315 if (ret) {
1345 /* 1316 /*
1346 * No physical channel was available. 1317 * No physical channel was available.
1347 * 1318 *
1348 * memcpy transfers can be sorted out at submission time. 1319 * memcpy transfers can be sorted out at submission time.
1349 *
1350 * Slave transfers may have been denied due to platform
1351 * channel muxing restrictions. Since there is no guarantee
1352 * that this will ever be resolved, and the signal must be
1353 * acquired AFTER acquiring the physical channel, we will let
1354 * them be NACK:ed with -EBUSY here. The drivers can retry
1355 * the prep() call if they are eager on doing this using DMA.
1356 */ 1320 */
1357 if (plchan->slave) { 1321 if (plchan->slave) {
1358 pl08x_free_txd_list(pl08x, plchan); 1322 pl08x_free_txd_list(pl08x, plchan);
@@ -1423,7 +1387,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1423 } 1387 }
1424 list_add_tail(&dsg->node, &txd->dsg_list); 1388 list_add_tail(&dsg->node, &txd->dsg_list);
1425 1389
1426 txd->direction = DMA_MEM_TO_MEM;
1427 dsg->src_addr = src; 1390 dsg->src_addr = src;
1428 dsg->dst_addr = dest; 1391 dsg->dst_addr = dest;
1429 dsg->len = len; 1392 dsg->len = len;
@@ -1477,8 +1440,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1477 * will take precedence since this may configure the 1440 * will take precedence since this may configure the
1478 * channel target address dynamically at runtime. 1441 * channel target address dynamically at runtime.
1479 */ 1442 */
1480 txd->direction = direction;
1481
1482 if (direction == DMA_MEM_TO_DEV) { 1443 if (direction == DMA_MEM_TO_DEV) {
1483 cctl = PL080_CONTROL_SRC_INCR; 1444 cctl = PL080_CONTROL_SRC_INCR;
1484 slave_addr = plchan->cfg.dst_addr; 1445 slave_addr = plchan->cfg.dst_addr;
@@ -1519,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1519 1480
1520 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1481 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1521 1482
1483 ret = pl08x_request_mux(plchan);
1484 if (ret < 0) {
1485 pl08x_free_txd(pl08x, txd);
1486 dev_dbg(&pl08x->adev->dev,
1487 "unable to mux for transfer on %s due to platform restrictions\n",
1488 plchan->name);
1489 return NULL;
1490 }
1491
1492 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
1493 plchan->signal, plchan->name);
1494
1495 /* Assign the flow control signal to this channel */
1496 if (direction == DMA_MEM_TO_DEV)
1497 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
1498 else
1499 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1500
1522 for_each_sg(sgl, sg, sg_len, tmp) { 1501 for_each_sg(sgl, sg, sg_len, tmp) {
1523 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1502 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1524 if (!dsg) { 1503 if (!dsg) {
1504 pl08x_release_mux(plchan);
1525 pl08x_free_txd(pl08x, txd); 1505 pl08x_free_txd(pl08x, txd);
1526 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1506 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1527 __func__); 1507 __func__);
@@ -1586,6 +1566,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1586 } 1566 }
1587 /* Dequeue jobs and free LLIs */ 1567 /* Dequeue jobs and free LLIs */
1588 if (plchan->at) { 1568 if (plchan->at) {
1569 /* Killing this one off, release its mux */
1570 pl08x_release_mux(plchan);
1589 pl08x_free_txd(pl08x, plchan->at); 1571 pl08x_free_txd(pl08x, plchan->at);
1590 plchan->at = NULL; 1572 plchan->at = NULL;
1591 } 1573 }
@@ -1702,7 +1684,6 @@ static void pl08x_tasklet(unsigned long data)
1702 1684
1703 /* 1685 /*
1704 * No more jobs, so free up the physical channel 1686 * No more jobs, so free up the physical channel
1705 * Free any allocated signal on slave transfers too
1706 */ 1687 */
1707 release_phy_channel(plchan); 1688 release_phy_channel(plchan);
1708 plchan->state = PL08X_CHAN_IDLE; 1689 plchan->state = PL08X_CHAN_IDLE;
@@ -1720,8 +1701,7 @@ static void pl08x_tasklet(unsigned long data)
1720 int ret; 1701 int ret;
1721 1702
1722 /* This should REALLY not fail now */ 1703 /* This should REALLY not fail now */
1723 ret = prep_phy_channel(waiting, 1704 ret = prep_phy_channel(waiting);
1724 waiting->waiting);
1725 BUG_ON(ret); 1705 BUG_ON(ret);
1726 waiting->phychan_hold--; 1706 waiting->phychan_hold--;
1727 waiting->state = PL08X_CHAN_RUNNING; 1707 waiting->state = PL08X_CHAN_RUNNING;
@@ -1794,6 +1774,11 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1794 tx = plchan->at; 1774 tx = plchan->at;
1795 if (tx) { 1775 if (tx) {
1796 plchan->at = NULL; 1776 plchan->at = NULL;
1777 /*
1778 * This descriptor is done, release its mux
1779 * reservation.
1780 */
1781 pl08x_release_mux(plchan);
1797 dma_cookie_complete(&tx->tx); 1782 dma_cookie_complete(&tx->tx);
1798 list_add_tail(&tx->node, &plchan->done_list); 1783 list_add_tail(&tx->node, &plchan->done_list);
1799 } 1784 }