aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-05-26 09:04:29 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-07-01 09:15:59 -0400
commit01d8dc64e92a0abace41028db5b9ca298458543f (patch)
tree3a3af4e7d0dc3a069ad70036f82fbdd010cabfa6 /drivers
parenta5a488db427ef1ba637163d1a699b170c20d9789 (diff)
dmaengine: PL08x: convert to use virt-dma structs
Convert PL08x to use the virt-dma structures. Acked-by: Linus Walleij <linus.walleij@linaro.org> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/amba-pl08x.c57
1 files changed, 29 insertions, 28 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index bbae30ceb8f..9a0642805b8 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -86,6 +86,7 @@
86#include <asm/hardware/pl080.h> 86#include <asm/hardware/pl080.h>
87 87
88#include "dmaengine.h" 88#include "dmaengine.h"
89#include "virt-dma.h"
89 90
90#define DRIVER_NAME "pl08xdmac" 91#define DRIVER_NAME "pl08xdmac"
91 92
@@ -165,7 +166,7 @@ struct pl08x_sg {
165 166
166/** 167/**
167 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 168 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
168 * @tx: async tx descriptor 169 * @vd: virtual DMA descriptor
169 * @node: node for txd list for channels 170 * @node: node for txd list for channels
170 * @dsg_list: list of children sg's 171 * @dsg_list: list of children sg's
171 * @llis_bus: DMA memory address (physical) start for the LLIs 172 * @llis_bus: DMA memory address (physical) start for the LLIs
@@ -174,7 +175,7 @@ struct pl08x_sg {
174 * @ccfg: config reg values for current txd 175 * @ccfg: config reg values for current txd
175 */ 176 */
176struct pl08x_txd { 177struct pl08x_txd {
177 struct dma_async_tx_descriptor tx; 178 struct virt_dma_desc vd;
178 struct list_head node; 179 struct list_head node;
179 struct list_head dsg_list; 180 struct list_head dsg_list;
180 dma_addr_t llis_bus; 181 dma_addr_t llis_bus;
@@ -208,7 +209,7 @@ enum pl08x_dma_chan_state {
208 209
209/** 210/**
210 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 211 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
211 * @chan: wrappped abstract channel 212 * @vc: wrappped virtual channel
212 * @phychan: the physical channel utilized by this channel, if there is one 213 * @phychan: the physical channel utilized by this channel, if there is one
213 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc 214 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
214 * @name: name of channel 215 * @name: name of channel
@@ -226,7 +227,7 @@ enum pl08x_dma_chan_state {
226 * @mux_use: count of descriptors using this DMA request signal setting 227 * @mux_use: count of descriptors using this DMA request signal setting
227 */ 228 */
228struct pl08x_dma_chan { 229struct pl08x_dma_chan {
229 struct dma_chan chan; 230 struct virt_dma_chan vc;
230 struct pl08x_phy_chan *phychan; 231 struct pl08x_phy_chan *phychan;
231 struct tasklet_struct tasklet; 232 struct tasklet_struct tasklet;
232 const char *name; 233 const char *name;
@@ -287,12 +288,12 @@ struct pl08x_driver_data {
287 288
288static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 289static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
289{ 290{
290 return container_of(chan, struct pl08x_dma_chan, chan); 291 return container_of(chan, struct pl08x_dma_chan, vc.chan);
291} 292}
292 293
293static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 294static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
294{ 295{
295 return container_of(tx, struct pl08x_txd, tx); 296 return container_of(tx, struct pl08x_txd, vd.tx);
296} 297}
297 298
298/* 299/*
@@ -648,14 +649,14 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
648 next = NULL; 649 next = NULL;
649 650
650 /* Find a waiting virtual channel for the next transfer. */ 651 /* Find a waiting virtual channel for the next transfer. */
651 list_for_each_entry(p, &pl08x->memcpy.channels, chan.device_node) 652 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
652 if (p->state == PL08X_CHAN_WAITING) { 653 if (p->state == PL08X_CHAN_WAITING) {
653 next = p; 654 next = p;
654 break; 655 break;
655 } 656 }
656 657
657 if (!next) { 658 if (!next) {
658 list_for_each_entry(p, &pl08x->slave.channels, chan.device_node) 659 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
659 if (p->state == PL08X_CHAN_WAITING) { 660 if (p->state == PL08X_CHAN_WAITING) {
660 next = p; 661 next = p;
661 break; 662 break;
@@ -1351,9 +1352,9 @@ static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1351 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1352 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1352 1353
1353 if (txd) { 1354 if (txd) {
1354 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); 1355 dma_async_tx_descriptor_init(&txd->vd.tx, &plchan->vc.chan);
1355 txd->tx.flags = flags; 1356 txd->vd.tx.flags = flags;
1356 txd->tx.tx_submit = pl08x_tx_submit; 1357 txd->vd.tx.tx_submit = pl08x_tx_submit;
1357 INIT_LIST_HEAD(&txd->node); 1358 INIT_LIST_HEAD(&txd->node);
1358 INIT_LIST_HEAD(&txd->dsg_list); 1359 INIT_LIST_HEAD(&txd->dsg_list);
1359 1360
@@ -1413,7 +1414,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1413 if (ret) 1414 if (ret)
1414 return NULL; 1415 return NULL;
1415 1416
1416 return &txd->tx; 1417 return &txd->vd.tx;
1417} 1418}
1418 1419
1419static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1420static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
@@ -1529,7 +1530,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1529 if (ret) 1530 if (ret)
1530 return NULL; 1531 return NULL;
1531 1532
1532 return &txd->tx; 1533 return &txd->vd.tx;
1533} 1534}
1534 1535
1535static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1536static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -1630,11 +1631,11 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1630 1631
1631static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1632static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1632{ 1633{
1633 struct device *dev = txd->tx.chan->device->dev; 1634 struct device *dev = txd->vd.tx.chan->device->dev;
1634 struct pl08x_sg *dsg; 1635 struct pl08x_sg *dsg;
1635 1636
1636 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1637 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1637 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1638 if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1638 list_for_each_entry(dsg, &txd->dsg_list, node) 1639 list_for_each_entry(dsg, &txd->dsg_list, node)
1639 dma_unmap_single(dev, dsg->src_addr, dsg->len, 1640 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1640 DMA_TO_DEVICE); 1641 DMA_TO_DEVICE);
@@ -1644,8 +1645,8 @@ static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1644 DMA_TO_DEVICE); 1645 DMA_TO_DEVICE);
1645 } 1646 }
1646 } 1647 }
1647 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1648 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1648 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1649 if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1649 list_for_each_entry(dsg, &txd->dsg_list, node) 1650 list_for_each_entry(dsg, &txd->dsg_list, node)
1650 dma_unmap_single(dev, dsg->dst_addr, dsg->len, 1651 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1651 DMA_FROM_DEVICE); 1652 DMA_FROM_DEVICE);
@@ -1670,8 +1671,8 @@ static void pl08x_tasklet(unsigned long data)
1670 while (!list_empty(&head)) { 1671 while (!list_empty(&head)) {
1671 struct pl08x_txd *txd = list_first_entry(&head, 1672 struct pl08x_txd *txd = list_first_entry(&head,
1672 struct pl08x_txd, node); 1673 struct pl08x_txd, node);
1673 dma_async_tx_callback callback = txd->tx.callback; 1674 dma_async_tx_callback callback = txd->vd.tx.callback;
1674 void *callback_param = txd->tx.callback_param; 1675 void *callback_param = txd->vd.tx.callback_param;
1675 1676
1676 list_del(&txd->node); 1677 list_del(&txd->node);
1677 1678
@@ -1732,7 +1733,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1732 * reservation. 1733 * reservation.
1733 */ 1734 */
1734 pl08x_release_mux(plchan); 1735 pl08x_release_mux(plchan);
1735 dma_cookie_complete(&tx->tx); 1736 dma_cookie_complete(&tx->vd.tx);
1736 list_add_tail(&tx->node, &plchan->done_list); 1737 list_add_tail(&tx->node, &plchan->done_list);
1737 1738
1738 /* 1739 /*
@@ -1807,8 +1808,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1807 "initialize virtual channel \"%s\"\n", 1808 "initialize virtual channel \"%s\"\n",
1808 chan->name); 1809 chan->name);
1809 1810
1810 chan->chan.device = dmadev; 1811 chan->vc.chan.device = dmadev;
1811 dma_cookie_init(&chan->chan); 1812 dma_cookie_init(&chan->vc.chan);
1812 1813
1813 spin_lock_init(&chan->lock); 1814 spin_lock_init(&chan->lock);
1814 INIT_LIST_HEAD(&chan->pend_list); 1815 INIT_LIST_HEAD(&chan->pend_list);
@@ -1817,7 +1818,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1817 tasklet_init(&chan->tasklet, pl08x_tasklet, 1818 tasklet_init(&chan->tasklet, pl08x_tasklet,
1818 (unsigned long) chan); 1819 (unsigned long) chan);
1819 1820
1820 list_add_tail(&chan->chan.device_node, &dmadev->channels); 1821 list_add_tail(&chan->vc.chan.device_node, &dmadev->channels);
1821 } 1822 }
1822 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1823 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1823 i, slave ? "slave" : "memcpy"); 1824 i, slave ? "slave" : "memcpy");
@@ -1830,8 +1831,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1830 struct pl08x_dma_chan *next; 1831 struct pl08x_dma_chan *next;
1831 1832
1832 list_for_each_entry_safe(chan, 1833 list_for_each_entry_safe(chan,
1833 next, &dmadev->channels, chan.device_node) { 1834 next, &dmadev->channels, vc.chan.device_node) {
1834 list_del(&chan->chan.device_node); 1835 list_del(&chan->vc.chan.device_node);
1835 kfree(chan); 1836 kfree(chan);
1836 } 1837 }
1837} 1838}
@@ -1884,7 +1885,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1884 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1885 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1885 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1886 seq_printf(s, "CHANNEL:\tSTATE:\n");
1886 seq_printf(s, "--------\t------\n"); 1887 seq_printf(s, "--------\t------\n");
1887 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1888 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
1888 seq_printf(s, "%s\t\t%s\n", chan->name, 1889 seq_printf(s, "%s\t\t%s\n", chan->name,
1889 pl08x_state_str(chan->state)); 1890 pl08x_state_str(chan->state));
1890 } 1891 }
@@ -1892,7 +1893,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1892 seq_printf(s, "\nPL08x virtual slave channels:\n"); 1893 seq_printf(s, "\nPL08x virtual slave channels:\n");
1893 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1894 seq_printf(s, "CHANNEL:\tSTATE:\n");
1894 seq_printf(s, "--------\t------\n"); 1895 seq_printf(s, "--------\t------\n");
1895 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1896 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
1896 seq_printf(s, "%s\t\t%s\n", chan->name, 1897 seq_printf(s, "%s\t\t%s\n", chan->name,
1897 pl08x_state_str(chan->state)); 1898 pl08x_state_str(chan->state));
1898 } 1899 }