aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-05-26 08:54:15 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-07-01 09:15:57 -0400
commita5a488db427ef1ba637163d1a699b170c20d9789 (patch)
treec78ff3401fdb4c09609d4c528767043da77a1532
parentc33b644cb31899265ec5102a4ed45c44269dde95 (diff)
dmaengine: PL08x: rejig physical channel allocation
Rework the physical channel allocation mechanism to only allocate physical channels to virtual channels when they're about to be used. This eliminates all the complexity with holding channels while descriptors are being prepared, which is completely unnecessary. This also brings this driver to a state where the generic virtual DMA code can be used with this driver, and opens up the possibility of properly scheduling and prioritorising physical DMA channels to virtual DMA channels. Acked-by: Linus Walleij <linus.walleij@linaro.org> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--drivers/dma/amba-pl08x.c268
1 files changed, 112 insertions, 156 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 30b6921f094..bbae30ceb8f 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -210,8 +210,6 @@ enum pl08x_dma_chan_state {
210 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 210 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
211 * @chan: wrappped abstract channel 211 * @chan: wrappped abstract channel
212 * @phychan: the physical channel utilized by this channel, if there is one 212 * @phychan: the physical channel utilized by this channel, if there is one
213 * @phychan_hold: if non-zero, hold on to the physical channel even if we
214 * have no pending entries
215 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc 213 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
216 * @name: name of channel 214 * @name: name of channel
217 * @cd: channel platform data 215 * @cd: channel platform data
@@ -230,7 +228,6 @@ enum pl08x_dma_chan_state {
230struct pl08x_dma_chan { 228struct pl08x_dma_chan {
231 struct dma_chan chan; 229 struct dma_chan chan;
232 struct pl08x_phy_chan *phychan; 230 struct pl08x_phy_chan *phychan;
233 int phychan_hold;
234 struct tasklet_struct tasklet; 231 struct tasklet_struct tasklet;
235 const char *name; 232 const char *name;
236 const struct pl08x_channel_data *cd; 233 const struct pl08x_channel_data *cd;
@@ -587,19 +584,111 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
587 return ch; 584 return ch;
588} 585}
589 586
587/* Mark the physical channel as free. Note, this write is atomic. */
590static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 588static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
591 struct pl08x_phy_chan *ch) 589 struct pl08x_phy_chan *ch)
592{ 590{
593 unsigned long flags; 591 ch->serving = NULL;
592}
594 593
595 spin_lock_irqsave(&ch->lock, flags); 594/*
595 * Try to allocate a physical channel. When successful, assign it to
596 * this virtual channel, and initiate the next descriptor. The
597 * virtual channel lock must be held at this point.
598 */
599static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
600{
601 struct pl08x_driver_data *pl08x = plchan->host;
602 struct pl08x_phy_chan *ch;
596 603
597 /* Stop the channel and clear its interrupts */ 604 ch = pl08x_get_phy_channel(pl08x, plchan);
598 pl08x_terminate_phy_chan(pl08x, ch); 605 if (!ch) {
606 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
607 plchan->state = PL08X_CHAN_WAITING;
608 return;
609 }
599 610
600 /* Mark it as free */ 611 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
601 ch->serving = NULL; 612 ch->id, plchan->name);
602 spin_unlock_irqrestore(&ch->lock, flags); 613
614 plchan->phychan = ch;
615 plchan->state = PL08X_CHAN_RUNNING;
616 pl08x_start_next_txd(plchan);
617}
618
619static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
620 struct pl08x_dma_chan *plchan)
621{
622 struct pl08x_driver_data *pl08x = plchan->host;
623
624 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
625 ch->id, plchan->name);
626
627 /*
628 * We do this without taking the lock; we're really only concerned
629 * about whether this pointer is NULL or not, and we're guaranteed
630 * that this will only be called when it _already_ is non-NULL.
631 */
632 ch->serving = plchan;
633 plchan->phychan = ch;
634 plchan->state = PL08X_CHAN_RUNNING;
635 pl08x_start_next_txd(plchan);
636}
637
638/*
639 * Free a physical DMA channel, potentially reallocating it to another
640 * virtual channel if we have any pending.
641 */
642static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
643{
644 struct pl08x_driver_data *pl08x = plchan->host;
645 struct pl08x_dma_chan *p, *next;
646
647 retry:
648 next = NULL;
649
650 /* Find a waiting virtual channel for the next transfer. */
651 list_for_each_entry(p, &pl08x->memcpy.channels, chan.device_node)
652 if (p->state == PL08X_CHAN_WAITING) {
653 next = p;
654 break;
655 }
656
657 if (!next) {
658 list_for_each_entry(p, &pl08x->slave.channels, chan.device_node)
659 if (p->state == PL08X_CHAN_WAITING) {
660 next = p;
661 break;
662 }
663 }
664
665 /* Ensure that the physical channel is stopped */
666 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
667
668 if (next) {
669 bool success;
670
671 /*
672 * Eww. We know this isn't going to deadlock
673 * but lockdep probably doesn't.
674 */
675 spin_lock(&next->lock);
676 /* Re-check the state now that we have the lock */
677 success = next->state == PL08X_CHAN_WAITING;
678 if (success)
679 pl08x_phy_reassign_start(plchan->phychan, next);
680 spin_unlock(&next->lock);
681
682 /* If the state changed, try to find another channel */
683 if (!success)
684 goto retry;
685 } else {
686 /* No more jobs, so free up the physical channel */
687 pl08x_put_phy_channel(pl08x, plchan->phychan);
688 }
689
690 plchan->phychan = NULL;
691 plchan->state = PL08X_CHAN_IDLE;
603} 692}
604 693
605/* 694/*
@@ -1028,45 +1117,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
1028{ 1117{
1029} 1118}
1030 1119
1031/*
1032 * This should be called with the channel plchan->lock held
1033 */
1034static int prep_phy_channel(struct pl08x_dma_chan *plchan)
1035{
1036 struct pl08x_driver_data *pl08x = plchan->host;
1037 struct pl08x_phy_chan *ch;
1038
1039 /* Check if we already have a channel */
1040 if (plchan->phychan) {
1041 ch = plchan->phychan;
1042 goto got_channel;
1043 }
1044
1045 ch = pl08x_get_phy_channel(pl08x, plchan);
1046 if (!ch) {
1047 /* No physical channel available, cope with it */
1048 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
1049 return -EBUSY;
1050 }
1051
1052 plchan->phychan = ch;
1053 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
1054 ch->id, plchan->name);
1055
1056got_channel:
1057 plchan->phychan_hold++;
1058
1059 return 0;
1060}
1061
1062static void release_phy_channel(struct pl08x_dma_chan *plchan)
1063{
1064 struct pl08x_driver_data *pl08x = plchan->host;
1065
1066 pl08x_put_phy_channel(pl08x, plchan->phychan);
1067 plchan->phychan = NULL;
1068}
1069
1070static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 1120static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1071{ 1121{
1072 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 1122 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
@@ -1079,19 +1129,6 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1079 1129
1080 /* Put this onto the pending list */ 1130 /* Put this onto the pending list */
1081 list_add_tail(&txd->node, &plchan->pend_list); 1131 list_add_tail(&txd->node, &plchan->pend_list);
1082
1083 /*
1084 * If there was no physical channel available for this memcpy,
1085 * stack the request up and indicate that the channel is waiting
1086 * for a free physical channel.
1087 */
1088 if (!plchan->slave && !plchan->phychan) {
1089 /* Do this memcpy whenever there is a channel ready */
1090 plchan->state = PL08X_CHAN_WAITING;
1091 } else {
1092 plchan->phychan_hold--;
1093 }
1094
1095 spin_unlock_irqrestore(&plchan->lock, flags); 1132 spin_unlock_irqrestore(&plchan->lock, flags);
1096 1133
1097 return cookie; 1134 return cookie;
@@ -1282,19 +1319,10 @@ static void pl08x_issue_pending(struct dma_chan *chan)
1282 1319
1283 spin_lock_irqsave(&plchan->lock, flags); 1320 spin_lock_irqsave(&plchan->lock, flags);
1284 list_splice_tail_init(&plchan->pend_list, &plchan->issued_list); 1321 list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
1285
1286 /* Something is already active, or we're waiting for a channel... */
1287 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
1288 spin_unlock_irqrestore(&plchan->lock, flags);
1289 return;
1290 }
1291
1292 /* Take the first element in the queue and execute it */
1293 if (!list_empty(&plchan->issued_list)) { 1322 if (!list_empty(&plchan->issued_list)) {
1294 plchan->state = PL08X_CHAN_RUNNING; 1323 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
1295 pl08x_start_next_txd(plchan); 1324 pl08x_phy_alloc_and_start(plchan);
1296 } 1325 }
1297
1298 spin_unlock_irqrestore(&plchan->lock, flags); 1326 spin_unlock_irqrestore(&plchan->lock, flags);
1299} 1327}
1300 1328
@@ -1302,48 +1330,18 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1302 struct pl08x_txd *txd) 1330 struct pl08x_txd *txd)
1303{ 1331{
1304 struct pl08x_driver_data *pl08x = plchan->host; 1332 struct pl08x_driver_data *pl08x = plchan->host;
1305 unsigned long flags; 1333 int num_llis;
1306 int num_llis, ret;
1307 1334
1308 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1335 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1309 if (!num_llis) { 1336 if (!num_llis) {
1337 unsigned long flags;
1338
1310 spin_lock_irqsave(&plchan->lock, flags); 1339 spin_lock_irqsave(&plchan->lock, flags);
1311 pl08x_free_txd(pl08x, txd); 1340 pl08x_free_txd(pl08x, txd);
1312 spin_unlock_irqrestore(&plchan->lock, flags); 1341 spin_unlock_irqrestore(&plchan->lock, flags);
1342
1313 return -EINVAL; 1343 return -EINVAL;
1314 } 1344 }
1315
1316 spin_lock_irqsave(&plchan->lock, flags);
1317
1318 /*
1319 * See if we already have a physical channel allocated,
1320 * else this is the time to try to get one.
1321 */
1322 ret = prep_phy_channel(plchan);
1323 if (ret) {
1324 /*
1325 * No physical channel was available.
1326 *
1327 * memcpy transfers can be sorted out at submission time.
1328 */
1329 if (plchan->slave) {
1330 pl08x_free_txd_list(pl08x, plchan);
1331 pl08x_free_txd(pl08x, txd);
1332 spin_unlock_irqrestore(&plchan->lock, flags);
1333 return -EBUSY;
1334 }
1335 } else
1336 /*
1337 * Else we're all set, paused and ready to roll, status
1338 * will switch to PL08X_CHAN_RUNNING when we call
1339 * issue_pending(). If there is something running on the
1340 * channel already we don't change its state.
1341 */
1342 if (plchan->state == PL08X_CHAN_IDLE)
1343 plchan->state = PL08X_CHAN_PAUSED;
1344
1345 spin_unlock_irqrestore(&plchan->lock, flags);
1346
1347 return 0; 1345 return 0;
1348} 1346}
1349 1347
@@ -1563,14 +1561,11 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1563 plchan->state = PL08X_CHAN_IDLE; 1561 plchan->state = PL08X_CHAN_IDLE;
1564 1562
1565 if (plchan->phychan) { 1563 if (plchan->phychan) {
1566 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
1567
1568 /* 1564 /*
1569 * Mark physical channel as free and free any slave 1565 * Mark physical channel as free and free any slave
1570 * signal 1566 * signal
1571 */ 1567 */
1572 release_phy_channel(plchan); 1568 pl08x_phy_free(plchan);
1573 plchan->phychan_hold = 0;
1574 } 1569 }
1575 /* Dequeue jobs and free LLIs */ 1570 /* Dequeue jobs and free LLIs */
1576 if (plchan->at) { 1571 if (plchan->at) {
@@ -1670,50 +1665,6 @@ static void pl08x_tasklet(unsigned long data)
1670 1665
1671 spin_lock_irqsave(&plchan->lock, flags); 1666 spin_lock_irqsave(&plchan->lock, flags);
1672 list_splice_tail_init(&plchan->done_list, &head); 1667 list_splice_tail_init(&plchan->done_list, &head);
1673
1674 if (plchan->at || !list_empty(&plchan->pend_list) || plchan->phychan_hold) {
1675 /*
1676 * This channel is still in use - we have a new txd being
1677 * prepared and will soon be queued. Don't give up the
1678 * physical channel.
1679 */
1680 } else {
1681 struct pl08x_dma_chan *waiting = NULL;
1682
1683 /*
1684 * No more jobs, so free up the physical channel
1685 */
1686 release_phy_channel(plchan);
1687 plchan->state = PL08X_CHAN_IDLE;
1688
1689 /*
1690 * And NOW before anyone else can grab that free:d up
1691 * physical channel, see if there is some memcpy pending
1692 * that seriously needs to start because of being stacked
1693 * up while we were choking the physical channels with data.
1694 */
1695 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1696 chan.device_node) {
1697 if (waiting->state == PL08X_CHAN_WAITING) {
1698 int ret;
1699
1700 /* This should REALLY not fail now */
1701 ret = prep_phy_channel(waiting);
1702 BUG_ON(ret);
1703 waiting->phychan_hold--;
1704 waiting->state = PL08X_CHAN_RUNNING;
1705 /*
1706 * Eww. We know this isn't going to deadlock
1707 * but lockdep probably doens't.
1708 */
1709 spin_lock(&waiting->lock);
1710 pl08x_start_next_txd(waiting);
1711 spin_unlock(&waiting->lock);
1712 break;
1713 }
1714 }
1715 }
1716
1717 spin_unlock_irqrestore(&plchan->lock, flags); 1668 spin_unlock_irqrestore(&plchan->lock, flags);
1718 1669
1719 while (!list_empty(&head)) { 1670 while (!list_empty(&head)) {
@@ -1784,9 +1735,14 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1784 dma_cookie_complete(&tx->tx); 1735 dma_cookie_complete(&tx->tx);
1785 list_add_tail(&tx->node, &plchan->done_list); 1736 list_add_tail(&tx->node, &plchan->done_list);
1786 1737
1787 /* And start the next descriptor */ 1738 /*
1739 * And start the next descriptor (if any),
1740 * otherwise free this channel.
1741 */
1788 if (!list_empty(&plchan->issued_list)) 1742 if (!list_empty(&plchan->issued_list))
1789 pl08x_start_next_txd(plchan); 1743 pl08x_start_next_txd(plchan);
1744 else
1745 pl08x_phy_free(plchan);
1790 } 1746 }
1791 spin_unlock(&plchan->lock); 1747 spin_unlock(&plchan->lock);
1792 1748