diff options
author | Vinod Koul <vinod.koul@intel.com> | 2015-02-02 19:55:35 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-02-02 19:55:35 -0500 |
commit | 2cd6f7928ca42c7744f493e0ae2a4acaf02331a0 (patch) | |
tree | 2051ca01dbfeaa3185df454d874b6c450f540a92 | |
parent | c914570f28552eb4ed6f016ec7b1db292a7c924b (diff) | |
parent | 5cf5aec5b38a5143883fc5b689bf5c1c8ee48aa3 (diff) |
Merge branch 'topic/slave_caps_device_control_fix_rebased' into for-linus
50 files changed, 1647 insertions, 1829 deletions
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt index 766658ccf235..05d2280190f1 100644 --- a/Documentation/dmaengine/provider.txt +++ b/Documentation/dmaengine/provider.txt | |||
@@ -113,6 +113,31 @@ need to initialize a few fields in there: | |||
113 | * channels: should be initialized as a list using the | 113 | * channels: should be initialized as a list using the |
114 | INIT_LIST_HEAD macro for example | 114 | INIT_LIST_HEAD macro for example |
115 | 115 | ||
116 | * src_addr_widths: | ||
117 | - should contain a bitmask of the supported source transfer width | ||
118 | |||
119 | * dst_addr_widths: | ||
120 | - should contain a bitmask of the supported destination transfer | ||
121 | width | ||
122 | |||
123 | * directions: | ||
124 | - should contain a bitmask of the supported slave directions | ||
125 | (i.e. excluding mem2mem transfers) | ||
126 | |||
127 | * residue_granularity: | ||
128 | - Granularity of the transfer residue reported to dma_set_residue. | ||
129 | - This can be either: | ||
130 | + Descriptor | ||
131 | -> Your device doesn't support any kind of residue | ||
132 | reporting. The framework will only know that a particular | ||
133 | transaction descriptor is done. | ||
134 | + Segment | ||
135 | -> Your device is able to report which chunks have been | ||
136 | transferred | ||
137 | + Burst | ||
138 | -> Your device is able to report which burst have been | ||
139 | transferred | ||
140 | |||
116 | * dev: should hold the pointer to the struct device associated | 141 | * dev: should hold the pointer to the struct device associated |
117 | to your current driver instance. | 142 | to your current driver instance. |
118 | 143 | ||
@@ -274,48 +299,36 @@ supported. | |||
274 | account the current period. | 299 | account the current period. |
275 | - This function can be called in an interrupt context. | 300 | - This function can be called in an interrupt context. |
276 | 301 | ||
277 | * device_control | 302 | * device_config |
278 | - Used by client drivers to control and configure the channel it | 303 | - Reconfigures the channel with the configuration given as |
279 | has a handle on. | 304 | argument |
280 | - Called with a command and an argument | 305 | - This command should NOT perform synchronously, or on any |
281 | + The command is one of the values listed by the enum | 306 | currently queued transfers, but only on subsequent ones |
282 | dma_ctrl_cmd. The valid commands are: | 307 | - In this case, the function will receive a dma_slave_config |
283 | + DMA_PAUSE | 308 | structure pointer as an argument, that will detail which |
284 | + Pauses a transfer on the channel | 309 | configuration to use. |
285 | + This command should operate synchronously on the channel, | 310 | - Even though that structure contains a direction field, this |
286 | pausing right away the work of the given channel | 311 | field is deprecated in favor of the direction argument given to |
287 | + DMA_RESUME | 312 | the prep_* functions |
288 | + Restarts a transfer on the channel | 313 | - This call is mandatory for slave operations only. This should NOT be |
289 | + This command should operate synchronously on the channel, | 314 | set or expected to be set for memcpy operations. |
290 | resuming right away the work of the given channel | 315 | If a driver support both, it should use this call for slave |
291 | + DMA_TERMINATE_ALL | 316 | operations only and not for memcpy ones. |
292 | + Aborts all the pending and ongoing transfers on the | 317 | |
293 | channel | 318 | * device_pause |
294 | + This command should operate synchronously on the channel, | 319 | - Pauses a transfer on the channel |
295 | terminating right away all the channels | 320 | - This command should operate synchronously on the channel, |
296 | + DMA_SLAVE_CONFIG | 321 | pausing right away the work of the given channel |
297 | + Reconfigures the channel with passed configuration | 322 | |
298 | + This command should NOT perform synchronously, or on any | 323 | * device_resume |
299 | currently queued transfers, but only on subsequent ones | 324 | - Resumes a transfer on the channel |
300 | + In this case, the function will receive a | 325 | - This command should operate synchronously on the channel, |
301 | dma_slave_config structure pointer as an argument, that | 326 | pausing right away the work of the given channel |
302 | will detail which configuration to use. | 327 | |
303 | + Even though that structure contains a direction field, | 328 | * device_terminate_all |
304 | this field is deprecated in favor of the direction | 329 | - Aborts all the pending and ongoing transfers on the channel |
305 | argument given to the prep_* functions | 330 | - This command should operate synchronously on the channel, |
306 | + FSLDMA_EXTERNAL_START | 331 | terminating right away all the channels |
307 | + TODO: Why does that even exist? | ||
308 | + The argument is an opaque unsigned long. This actually is a | ||
309 | pointer to a struct dma_slave_config that should be used only | ||
310 | in the DMA_SLAVE_CONFIG. | ||
311 | |||
312 | * device_slave_caps | ||
313 | - Called through the framework by client drivers in order to have | ||
314 | an idea of what are the properties of the channel allocated to | ||
315 | them. | ||
316 | - Such properties are the buswidth, available directions, etc. | ||
317 | - Required for every generic layer doing DMA transfers, such as | ||
318 | ASoC. | ||
319 | 332 | ||
320 | Misc notes (stuff that should be documented, but don't really know | 333 | Misc notes (stuff that should be documented, but don't really know |
321 | where to put them) | 334 | where to put them) |
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index f831bb952b2f..458d92158b12 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
@@ -606,12 +606,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx) | |||
606 | dev_dbg(ctx->device->dev, "[%s]: ", __func__); | 606 | dev_dbg(ctx->device->dev, "[%s]: ", __func__); |
607 | 607 | ||
608 | chan = ctx->device->dma.chan_mem2cryp; | 608 | chan = ctx->device->dma.chan_mem2cryp; |
609 | dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); | 609 | dmaengine_terminate_all(chan); |
610 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, | 610 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, |
611 | ctx->device->dma.sg_src_len, DMA_TO_DEVICE); | 611 | ctx->device->dma.sg_src_len, DMA_TO_DEVICE); |
612 | 612 | ||
613 | chan = ctx->device->dma.chan_cryp2mem; | 613 | chan = ctx->device->dma.chan_cryp2mem; |
614 | dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); | 614 | dmaengine_terminate_all(chan); |
615 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, | 615 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, |
616 | ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); | 616 | ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); |
617 | } | 617 | } |
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 70a20871e998..187a8fd7eee7 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c | |||
@@ -202,7 +202,7 @@ static void hash_dma_done(struct hash_ctx *ctx) | |||
202 | struct dma_chan *chan; | 202 | struct dma_chan *chan; |
203 | 203 | ||
204 | chan = ctx->device->dma.chan_mem2hash; | 204 | chan = ctx->device->dma.chan_mem2hash; |
205 | dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); | 205 | dmaengine_terminate_all(chan); |
206 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, | 206 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, |
207 | ctx->device->dma.sg_len, DMA_TO_DEVICE); | 207 | ctx->device->dma.sg_len, DMA_TO_DEVICE); |
208 | } | 208 | } |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 1364d00881dd..4a5fd245014e 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -1386,32 +1386,6 @@ static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, | |||
1386 | return pl08x_cctl(cctl); | 1386 | return pl08x_cctl(cctl); |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | static int dma_set_runtime_config(struct dma_chan *chan, | ||
1390 | struct dma_slave_config *config) | ||
1391 | { | ||
1392 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1393 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1394 | |||
1395 | if (!plchan->slave) | ||
1396 | return -EINVAL; | ||
1397 | |||
1398 | /* Reject definitely invalid configurations */ | ||
1399 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | ||
1400 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | ||
1401 | return -EINVAL; | ||
1402 | |||
1403 | if (config->device_fc && pl08x->vd->pl080s) { | ||
1404 | dev_err(&pl08x->adev->dev, | ||
1405 | "%s: PL080S does not support peripheral flow control\n", | ||
1406 | __func__); | ||
1407 | return -EINVAL; | ||
1408 | } | ||
1409 | |||
1410 | plchan->cfg = *config; | ||
1411 | |||
1412 | return 0; | ||
1413 | } | ||
1414 | |||
1415 | /* | 1389 | /* |
1416 | * Slave transactions callback to the slave device to allow | 1390 | * Slave transactions callback to the slave device to allow |
1417 | * synchronization of slave DMA signals with the DMAC enable | 1391 | * synchronization of slave DMA signals with the DMAC enable |
@@ -1693,20 +1667,71 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( | |||
1693 | return vchan_tx_prep(&plchan->vc, &txd->vd, flags); | 1667 | return vchan_tx_prep(&plchan->vc, &txd->vd, flags); |
1694 | } | 1668 | } |
1695 | 1669 | ||
1696 | static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1670 | static int pl08x_config(struct dma_chan *chan, |
1697 | unsigned long arg) | 1671 | struct dma_slave_config *config) |
1672 | { | ||
1673 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1674 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1675 | |||
1676 | if (!plchan->slave) | ||
1677 | return -EINVAL; | ||
1678 | |||
1679 | /* Reject definitely invalid configurations */ | ||
1680 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | ||
1681 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | ||
1682 | return -EINVAL; | ||
1683 | |||
1684 | if (config->device_fc && pl08x->vd->pl080s) { | ||
1685 | dev_err(&pl08x->adev->dev, | ||
1686 | "%s: PL080S does not support peripheral flow control\n", | ||
1687 | __func__); | ||
1688 | return -EINVAL; | ||
1689 | } | ||
1690 | |||
1691 | plchan->cfg = *config; | ||
1692 | |||
1693 | return 0; | ||
1694 | } | ||
1695 | |||
1696 | static int pl08x_terminate_all(struct dma_chan *chan) | ||
1698 | { | 1697 | { |
1699 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1698 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1700 | struct pl08x_driver_data *pl08x = plchan->host; | 1699 | struct pl08x_driver_data *pl08x = plchan->host; |
1701 | unsigned long flags; | 1700 | unsigned long flags; |
1702 | int ret = 0; | ||
1703 | 1701 | ||
1704 | /* Controls applicable to inactive channels */ | 1702 | spin_lock_irqsave(&plchan->vc.lock, flags); |
1705 | if (cmd == DMA_SLAVE_CONFIG) { | 1703 | if (!plchan->phychan && !plchan->at) { |
1706 | return dma_set_runtime_config(chan, | 1704 | spin_unlock_irqrestore(&plchan->vc.lock, flags); |
1707 | (struct dma_slave_config *)arg); | 1705 | return 0; |
1708 | } | 1706 | } |
1709 | 1707 | ||
1708 | plchan->state = PL08X_CHAN_IDLE; | ||
1709 | |||
1710 | if (plchan->phychan) { | ||
1711 | /* | ||
1712 | * Mark physical channel as free and free any slave | ||
1713 | * signal | ||
1714 | */ | ||
1715 | pl08x_phy_free(plchan); | ||
1716 | } | ||
1717 | /* Dequeue jobs and free LLIs */ | ||
1718 | if (plchan->at) { | ||
1719 | pl08x_desc_free(&plchan->at->vd); | ||
1720 | plchan->at = NULL; | ||
1721 | } | ||
1722 | /* Dequeue jobs not yet fired as well */ | ||
1723 | pl08x_free_txd_list(pl08x, plchan); | ||
1724 | |||
1725 | spin_unlock_irqrestore(&plchan->vc.lock, flags); | ||
1726 | |||
1727 | return 0; | ||
1728 | } | ||
1729 | |||
1730 | static int pl08x_pause(struct dma_chan *chan) | ||
1731 | { | ||
1732 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1733 | unsigned long flags; | ||
1734 | |||
1710 | /* | 1735 | /* |
1711 | * Anything succeeds on channels with no physical allocation and | 1736 | * Anything succeeds on channels with no physical allocation and |
1712 | * no queued transfers. | 1737 | * no queued transfers. |
@@ -1717,42 +1742,35 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1717 | return 0; | 1742 | return 0; |
1718 | } | 1743 | } |
1719 | 1744 | ||
1720 | switch (cmd) { | 1745 | pl08x_pause_phy_chan(plchan->phychan); |
1721 | case DMA_TERMINATE_ALL: | 1746 | plchan->state = PL08X_CHAN_PAUSED; |
1722 | plchan->state = PL08X_CHAN_IDLE; | ||
1723 | 1747 | ||
1724 | if (plchan->phychan) { | 1748 | spin_unlock_irqrestore(&plchan->vc.lock, flags); |
1725 | /* | 1749 | |
1726 | * Mark physical channel as free and free any slave | 1750 | return 0; |
1727 | * signal | 1751 | } |
1728 | */ | 1752 | |
1729 | pl08x_phy_free(plchan); | 1753 | static int pl08x_resume(struct dma_chan *chan) |
1730 | } | 1754 | { |
1731 | /* Dequeue jobs and free LLIs */ | 1755 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1732 | if (plchan->at) { | 1756 | unsigned long flags; |
1733 | pl08x_desc_free(&plchan->at->vd); | 1757 | |
1734 | plchan->at = NULL; | 1758 | /* |
1735 | } | 1759 | * Anything succeeds on channels with no physical allocation and |
1736 | /* Dequeue jobs not yet fired as well */ | 1760 | * no queued transfers. |
1737 | pl08x_free_txd_list(pl08x, plchan); | 1761 | */ |
1738 | break; | 1762 | spin_lock_irqsave(&plchan->vc.lock, flags); |
1739 | case DMA_PAUSE: | 1763 | if (!plchan->phychan && !plchan->at) { |
1740 | pl08x_pause_phy_chan(plchan->phychan); | 1764 | spin_unlock_irqrestore(&plchan->vc.lock, flags); |
1741 | plchan->state = PL08X_CHAN_PAUSED; | 1765 | return 0; |
1742 | break; | ||
1743 | case DMA_RESUME: | ||
1744 | pl08x_resume_phy_chan(plchan->phychan); | ||
1745 | plchan->state = PL08X_CHAN_RUNNING; | ||
1746 | break; | ||
1747 | default: | ||
1748 | /* Unknown command */ | ||
1749 | ret = -ENXIO; | ||
1750 | break; | ||
1751 | } | 1766 | } |
1752 | 1767 | ||
1768 | pl08x_resume_phy_chan(plchan->phychan); | ||
1769 | plchan->state = PL08X_CHAN_RUNNING; | ||
1770 | |||
1753 | spin_unlock_irqrestore(&plchan->vc.lock, flags); | 1771 | spin_unlock_irqrestore(&plchan->vc.lock, flags); |
1754 | 1772 | ||
1755 | return ret; | 1773 | return 0; |
1756 | } | 1774 | } |
1757 | 1775 | ||
1758 | bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | 1776 | bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) |
@@ -2048,7 +2066,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2048 | pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; | 2066 | pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; |
2049 | pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; | 2067 | pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; |
2050 | pl08x->memcpy.device_issue_pending = pl08x_issue_pending; | 2068 | pl08x->memcpy.device_issue_pending = pl08x_issue_pending; |
2051 | pl08x->memcpy.device_control = pl08x_control; | 2069 | pl08x->memcpy.device_config = pl08x_config; |
2070 | pl08x->memcpy.device_pause = pl08x_pause; | ||
2071 | pl08x->memcpy.device_resume = pl08x_resume; | ||
2072 | pl08x->memcpy.device_terminate_all = pl08x_terminate_all; | ||
2052 | 2073 | ||
2053 | /* Initialize slave engine */ | 2074 | /* Initialize slave engine */ |
2054 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); | 2075 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); |
@@ -2061,7 +2082,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2061 | pl08x->slave.device_issue_pending = pl08x_issue_pending; | 2082 | pl08x->slave.device_issue_pending = pl08x_issue_pending; |
2062 | pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; | 2083 | pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; |
2063 | pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; | 2084 | pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; |
2064 | pl08x->slave.device_control = pl08x_control; | 2085 | pl08x->slave.device_config = pl08x_config; |
2086 | pl08x->slave.device_pause = pl08x_pause; | ||
2087 | pl08x->slave.device_resume = pl08x_resume; | ||
2088 | pl08x->slave.device_terminate_all = pl08x_terminate_all; | ||
2065 | 2089 | ||
2066 | /* Get the platform data */ | 2090 | /* Get the platform data */ |
2067 | pl08x->pd = dev_get_platdata(&adev->dev); | 2091 | pl08x->pd = dev_get_platdata(&adev->dev); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index ca9dd2613283..1e1a4c567542 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -42,6 +42,11 @@ | |||
42 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) | 42 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) |
43 | #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ | 43 | #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ |
44 | |ATC_DIF(AT_DMA_MEM_IF)) | 44 | |ATC_DIF(AT_DMA_MEM_IF)) |
45 | #define ATC_DMA_BUSWIDTHS\ | ||
46 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ | ||
47 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ | ||
48 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ | ||
49 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
45 | 50 | ||
46 | /* | 51 | /* |
47 | * Initial number of descriptors to allocate for each channel. This could | 52 | * Initial number of descriptors to allocate for each channel. This could |
@@ -972,11 +977,13 @@ err_out: | |||
972 | return NULL; | 977 | return NULL; |
973 | } | 978 | } |
974 | 979 | ||
975 | static int set_runtime_config(struct dma_chan *chan, | 980 | static int atc_config(struct dma_chan *chan, |
976 | struct dma_slave_config *sconfig) | 981 | struct dma_slave_config *sconfig) |
977 | { | 982 | { |
978 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 983 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
979 | 984 | ||
985 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | ||
986 | |||
980 | /* Check if it is chan is configured for slave transfers */ | 987 | /* Check if it is chan is configured for slave transfers */ |
981 | if (!chan->private) | 988 | if (!chan->private) |
982 | return -EINVAL; | 989 | return -EINVAL; |
@@ -989,9 +996,28 @@ static int set_runtime_config(struct dma_chan *chan, | |||
989 | return 0; | 996 | return 0; |
990 | } | 997 | } |
991 | 998 | ||
999 | static int atc_pause(struct dma_chan *chan) | ||
1000 | { | ||
1001 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
1002 | struct at_dma *atdma = to_at_dma(chan->device); | ||
1003 | int chan_id = atchan->chan_common.chan_id; | ||
1004 | unsigned long flags; | ||
992 | 1005 | ||
993 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1006 | LIST_HEAD(list); |
994 | unsigned long arg) | 1007 | |
1008 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | ||
1009 | |||
1010 | spin_lock_irqsave(&atchan->lock, flags); | ||
1011 | |||
1012 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); | ||
1013 | set_bit(ATC_IS_PAUSED, &atchan->status); | ||
1014 | |||
1015 | spin_unlock_irqrestore(&atchan->lock, flags); | ||
1016 | |||
1017 | return 0; | ||
1018 | } | ||
1019 | |||
1020 | static int atc_resume(struct dma_chan *chan) | ||
995 | { | 1021 | { |
996 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 1022 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
997 | struct at_dma *atdma = to_at_dma(chan->device); | 1023 | struct at_dma *atdma = to_at_dma(chan->device); |
@@ -1000,60 +1026,61 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1000 | 1026 | ||
1001 | LIST_HEAD(list); | 1027 | LIST_HEAD(list); |
1002 | 1028 | ||
1003 | dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); | 1029 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
1004 | 1030 | ||
1005 | if (cmd == DMA_PAUSE) { | 1031 | if (!atc_chan_is_paused(atchan)) |
1006 | spin_lock_irqsave(&atchan->lock, flags); | 1032 | return 0; |
1007 | 1033 | ||
1008 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); | 1034 | spin_lock_irqsave(&atchan->lock, flags); |
1009 | set_bit(ATC_IS_PAUSED, &atchan->status); | ||
1010 | 1035 | ||
1011 | spin_unlock_irqrestore(&atchan->lock, flags); | 1036 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); |
1012 | } else if (cmd == DMA_RESUME) { | 1037 | clear_bit(ATC_IS_PAUSED, &atchan->status); |
1013 | if (!atc_chan_is_paused(atchan)) | ||
1014 | return 0; | ||
1015 | 1038 | ||
1016 | spin_lock_irqsave(&atchan->lock, flags); | 1039 | spin_unlock_irqrestore(&atchan->lock, flags); |
1017 | 1040 | ||
1018 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); | 1041 | return 0; |
1019 | clear_bit(ATC_IS_PAUSED, &atchan->status); | 1042 | } |
1020 | 1043 | ||
1021 | spin_unlock_irqrestore(&atchan->lock, flags); | 1044 | static int atc_terminate_all(struct dma_chan *chan) |
1022 | } else if (cmd == DMA_TERMINATE_ALL) { | 1045 | { |
1023 | struct at_desc *desc, *_desc; | 1046 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1024 | /* | 1047 | struct at_dma *atdma = to_at_dma(chan->device); |
1025 | * This is only called when something went wrong elsewhere, so | 1048 | int chan_id = atchan->chan_common.chan_id; |
1026 | * we don't really care about the data. Just disable the | 1049 | struct at_desc *desc, *_desc; |
1027 | * channel. We still have to poll the channel enable bit due | 1050 | unsigned long flags; |
1028 | * to AHB/HSB limitations. | ||
1029 | */ | ||
1030 | spin_lock_irqsave(&atchan->lock, flags); | ||
1031 | 1051 | ||
1032 | /* disabling channel: must also remove suspend state */ | 1052 | LIST_HEAD(list); |
1033 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); | ||
1034 | 1053 | ||
1035 | /* confirm that this channel is disabled */ | 1054 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
1036 | while (dma_readl(atdma, CHSR) & atchan->mask) | ||
1037 | cpu_relax(); | ||
1038 | 1055 | ||
1039 | /* active_list entries will end up before queued entries */ | 1056 | /* |
1040 | list_splice_init(&atchan->queue, &list); | 1057 | * This is only called when something went wrong elsewhere, so |
1041 | list_splice_init(&atchan->active_list, &list); | 1058 | * we don't really care about the data. Just disable the |
1059 | * channel. We still have to poll the channel enable bit due | ||
1060 | * to AHB/HSB limitations. | ||
1061 | */ | ||
1062 | spin_lock_irqsave(&atchan->lock, flags); | ||
1042 | 1063 | ||
1043 | /* Flush all pending and queued descriptors */ | 1064 | /* disabling channel: must also remove suspend state */ |
1044 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 1065 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); |
1045 | atc_chain_complete(atchan, desc); | ||
1046 | 1066 | ||
1047 | clear_bit(ATC_IS_PAUSED, &atchan->status); | 1067 | /* confirm that this channel is disabled */ |
1048 | /* if channel dedicated to cyclic operations, free it */ | 1068 | while (dma_readl(atdma, CHSR) & atchan->mask) |
1049 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | 1069 | cpu_relax(); |
1050 | 1070 | ||
1051 | spin_unlock_irqrestore(&atchan->lock, flags); | 1071 | /* active_list entries will end up before queued entries */ |
1052 | } else if (cmd == DMA_SLAVE_CONFIG) { | 1072 | list_splice_init(&atchan->queue, &list); |
1053 | return set_runtime_config(chan, (struct dma_slave_config *)arg); | 1073 | list_splice_init(&atchan->active_list, &list); |
1054 | } else { | 1074 | |
1055 | return -ENXIO; | 1075 | /* Flush all pending and queued descriptors */ |
1056 | } | 1076 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
1077 | atc_chain_complete(atchan, desc); | ||
1078 | |||
1079 | clear_bit(ATC_IS_PAUSED, &atchan->status); | ||
1080 | /* if channel dedicated to cyclic operations, free it */ | ||
1081 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | ||
1082 | |||
1083 | spin_unlock_irqrestore(&atchan->lock, flags); | ||
1057 | 1084 | ||
1058 | return 0; | 1085 | return 0; |
1059 | } | 1086 | } |
@@ -1505,7 +1532,14 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1505 | /* controller can do slave DMA: can trigger cyclic transfers */ | 1532 | /* controller can do slave DMA: can trigger cyclic transfers */ |
1506 | dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); | 1533 | dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); |
1507 | atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; | 1534 | atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; |
1508 | atdma->dma_common.device_control = atc_control; | 1535 | atdma->dma_common.device_config = atc_config; |
1536 | atdma->dma_common.device_pause = atc_pause; | ||
1537 | atdma->dma_common.device_resume = atc_resume; | ||
1538 | atdma->dma_common.device_terminate_all = atc_terminate_all; | ||
1539 | atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS; | ||
1540 | atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS; | ||
1541 | atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1542 | atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1509 | } | 1543 | } |
1510 | 1544 | ||
1511 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1545 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
@@ -1622,7 +1656,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan) | |||
1622 | if (!atc_chan_is_paused(atchan)) { | 1656 | if (!atc_chan_is_paused(atchan)) { |
1623 | dev_warn(chan2dev(chan), | 1657 | dev_warn(chan2dev(chan), |
1624 | "cyclic channel not paused, should be done by channel user\n"); | 1658 | "cyclic channel not paused, should be done by channel user\n"); |
1625 | atc_control(chan, DMA_PAUSE, 0); | 1659 | atc_pause(chan); |
1626 | } | 1660 | } |
1627 | 1661 | ||
1628 | /* now preserve additional data for cyclic operations */ | 1662 | /* now preserve additional data for cyclic operations */ |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 2787aba60c6b..d6bba6c636c2 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -232,7 +232,8 @@ enum atc_status { | |||
232 | * @save_dscr: for cyclic operations, preserve next descriptor address in | 232 | * @save_dscr: for cyclic operations, preserve next descriptor address in |
233 | * the cyclic list on suspend/resume cycle | 233 | * the cyclic list on suspend/resume cycle |
234 | * @remain_desc: to save remain desc length | 234 | * @remain_desc: to save remain desc length |
235 | * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG | 235 | * @dma_sconfig: configuration for slave transfers, passed via |
236 | * .device_config | ||
236 | * @lock: serializes enqueue/dequeue operations to descriptors lists | 237 | * @lock: serializes enqueue/dequeue operations to descriptors lists |
237 | * @active_list: list of descriptors dmaengine is being running on | 238 | * @active_list: list of descriptors dmaengine is being running on |
238 | * @queue: list of descriptors ready to be submitted to engine | 239 | * @queue: list of descriptors ready to be submitted to engine |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 1c4c96b7e2bf..c39000b9980a 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -174,6 +174,13 @@ | |||
174 | 174 | ||
175 | #define AT_XDMAC_MAX_CHAN 0x20 | 175 | #define AT_XDMAC_MAX_CHAN 0x20 |
176 | 176 | ||
177 | #define AT_XDMAC_DMA_BUSWIDTHS\ | ||
178 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ | ||
179 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ | ||
180 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ | ||
181 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\ | ||
182 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | ||
183 | |||
177 | enum atc_status { | 184 | enum atc_status { |
178 | AT_XDMAC_CHAN_IS_CYCLIC = 0, | 185 | AT_XDMAC_CHAN_IS_CYCLIC = 0, |
179 | AT_XDMAC_CHAN_IS_PAUSED, | 186 | AT_XDMAC_CHAN_IS_PAUSED, |
@@ -1107,58 +1114,75 @@ static void at_xdmac_issue_pending(struct dma_chan *chan) | |||
1107 | return; | 1114 | return; |
1108 | } | 1115 | } |
1109 | 1116 | ||
1110 | static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1117 | static int at_xdmac_device_config(struct dma_chan *chan, |
1111 | unsigned long arg) | 1118 | struct dma_slave_config *config) |
1119 | { | ||
1120 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1121 | int ret; | ||
1122 | |||
1123 | dev_dbg(chan2dev(chan), "%s\n", __func__); | ||
1124 | |||
1125 | spin_lock_bh(&atchan->lock); | ||
1126 | ret = at_xdmac_set_slave_config(chan, config); | ||
1127 | spin_unlock_bh(&atchan->lock); | ||
1128 | |||
1129 | return ret; | ||
1130 | } | ||
1131 | |||
1132 | static int at_xdmac_device_pause(struct dma_chan *chan) | ||
1112 | { | 1133 | { |
1113 | struct at_xdmac_desc *desc, *_desc; | ||
1114 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | 1134 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
1115 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | 1135 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); |
1116 | int ret = 0; | ||
1117 | 1136 | ||
1118 | dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd); | 1137 | dev_dbg(chan2dev(chan), "%s\n", __func__); |
1119 | 1138 | ||
1120 | spin_lock_bh(&atchan->lock); | 1139 | spin_lock_bh(&atchan->lock); |
1140 | at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); | ||
1141 | set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | ||
1142 | spin_unlock_bh(&atchan->lock); | ||
1143 | |||
1144 | return 0; | ||
1145 | } | ||
1121 | 1146 | ||
1122 | switch (cmd) { | 1147 | static int at_xdmac_device_resume(struct dma_chan *chan) |
1123 | case DMA_PAUSE: | 1148 | { |
1124 | at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); | 1149 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
1125 | set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | 1150 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); |
1126 | break; | ||
1127 | 1151 | ||
1128 | case DMA_RESUME: | 1152 | dev_dbg(chan2dev(chan), "%s\n", __func__); |
1129 | if (!at_xdmac_chan_is_paused(atchan)) | ||
1130 | break; | ||
1131 | 1153 | ||
1132 | at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); | 1154 | spin_lock_bh(&atchan->lock); |
1133 | clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | 1155 | if (!at_xdmac_chan_is_paused(atchan)) |
1134 | break; | 1156 | return 0; |
1135 | 1157 | ||
1136 | case DMA_TERMINATE_ALL: | 1158 | at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); |
1137 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); | 1159 | clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); |
1138 | while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) | 1160 | spin_unlock_bh(&atchan->lock); |
1139 | cpu_relax(); | 1161 | |
1162 | return 0; | ||
1163 | } | ||
1140 | 1164 | ||
1141 | /* Cancel all pending transfers. */ | 1165 | static int at_xdmac_device_terminate_all(struct dma_chan *chan) |
1142 | list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) | 1166 | { |
1143 | at_xdmac_remove_xfer(atchan, desc); | 1167 | struct at_xdmac_desc *desc, *_desc; |
1168 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1169 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | ||
1144 | 1170 | ||
1145 | clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); | 1171 | dev_dbg(chan2dev(chan), "%s\n", __func__); |
1146 | break; | ||
1147 | 1172 | ||
1148 | case DMA_SLAVE_CONFIG: | 1173 | spin_lock_bh(&atchan->lock); |
1149 | ret = at_xdmac_set_slave_config(chan, | 1174 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); |
1150 | (struct dma_slave_config *)arg); | 1175 | while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) |
1151 | break; | 1176 | cpu_relax(); |
1152 | 1177 | ||
1153 | default: | 1178 | /* Cancel all pending transfers. */ |
1154 | dev_err(chan2dev(chan), | 1179 | list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) |
1155 | "unmanaged or unknown dma control cmd: %d\n", cmd); | 1180 | at_xdmac_remove_xfer(atchan, desc); |
1156 | ret = -ENXIO; | ||
1157 | } | ||
1158 | 1181 | ||
1182 | clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); | ||
1159 | spin_unlock_bh(&atchan->lock); | 1183 | spin_unlock_bh(&atchan->lock); |
1160 | 1184 | ||
1161 | return ret; | 1185 | return 0; |
1162 | } | 1186 | } |
1163 | 1187 | ||
1164 | static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) | 1188 | static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) |
@@ -1217,27 +1241,6 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan) | |||
1217 | return; | 1241 | return; |
1218 | } | 1242 | } |
1219 | 1243 | ||
1220 | #define AT_XDMAC_DMA_BUSWIDTHS\ | ||
1221 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ | ||
1222 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ | ||
1223 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ | ||
1224 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\ | ||
1225 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | ||
1226 | |||
1227 | static int at_xdmac_device_slave_caps(struct dma_chan *dchan, | ||
1228 | struct dma_slave_caps *caps) | ||
1229 | { | ||
1230 | |||
1231 | caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; | ||
1232 | caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; | ||
1233 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1234 | caps->cmd_pause = true; | ||
1235 | caps->cmd_terminate = true; | ||
1236 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1237 | |||
1238 | return 0; | ||
1239 | } | ||
1240 | |||
1241 | #ifdef CONFIG_PM | 1244 | #ifdef CONFIG_PM |
1242 | static int atmel_xdmac_prepare(struct device *dev) | 1245 | static int atmel_xdmac_prepare(struct device *dev) |
1243 | { | 1246 | { |
@@ -1270,7 +1273,7 @@ static int atmel_xdmac_suspend(struct device *dev) | |||
1270 | 1273 | ||
1271 | if (at_xdmac_chan_is_cyclic(atchan)) { | 1274 | if (at_xdmac_chan_is_cyclic(atchan)) { |
1272 | if (!at_xdmac_chan_is_paused(atchan)) | 1275 | if (!at_xdmac_chan_is_paused(atchan)) |
1273 | at_xdmac_control(chan, DMA_PAUSE, 0); | 1276 | at_xdmac_device_pause(chan); |
1274 | atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); | 1277 | atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); |
1275 | atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); | 1278 | atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); |
1276 | atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); | 1279 | atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); |
@@ -1407,8 +1410,14 @@ static int at_xdmac_probe(struct platform_device *pdev) | |||
1407 | atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; | 1410 | atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; |
1408 | atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; | 1411 | atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; |
1409 | atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; | 1412 | atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; |
1410 | atxdmac->dma.device_control = at_xdmac_control; | 1413 | atxdmac->dma.device_config = at_xdmac_device_config; |
1411 | atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps; | 1414 | atxdmac->dma.device_pause = at_xdmac_device_pause; |
1415 | atxdmac->dma.device_resume = at_xdmac_device_resume; | ||
1416 | atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all; | ||
1417 | atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; | ||
1418 | atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; | ||
1419 | atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1420 | atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1412 | 1421 | ||
1413 | /* Disable all chans and interrupts. */ | 1422 | /* Disable all chans and interrupts. */ |
1414 | at_xdmac_off(atxdmac); | 1423 | at_xdmac_off(atxdmac); |
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 918b7b3f766f..0723096fb50a 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
@@ -436,9 +436,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | |||
436 | return vchan_tx_prep(&c->vc, &d->vd, flags); | 436 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
437 | } | 437 | } |
438 | 438 | ||
439 | static int bcm2835_dma_slave_config(struct bcm2835_chan *c, | 439 | static int bcm2835_dma_slave_config(struct dma_chan *chan, |
440 | struct dma_slave_config *cfg) | 440 | struct dma_slave_config *cfg) |
441 | { | 441 | { |
442 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | ||
443 | |||
442 | if ((cfg->direction == DMA_DEV_TO_MEM && | 444 | if ((cfg->direction == DMA_DEV_TO_MEM && |
443 | cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | 445 | cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || |
444 | (cfg->direction == DMA_MEM_TO_DEV && | 446 | (cfg->direction == DMA_MEM_TO_DEV && |
@@ -452,8 +454,9 @@ static int bcm2835_dma_slave_config(struct bcm2835_chan *c, | |||
452 | return 0; | 454 | return 0; |
453 | } | 455 | } |
454 | 456 | ||
455 | static int bcm2835_dma_terminate_all(struct bcm2835_chan *c) | 457 | static int bcm2835_dma_terminate_all(struct dma_chan *chan) |
456 | { | 458 | { |
459 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | ||
457 | struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); | 460 | struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); |
458 | unsigned long flags; | 461 | unsigned long flags; |
459 | int timeout = 10000; | 462 | int timeout = 10000; |
@@ -495,24 +498,6 @@ static int bcm2835_dma_terminate_all(struct bcm2835_chan *c) | |||
495 | return 0; | 498 | return 0; |
496 | } | 499 | } |
497 | 500 | ||
498 | static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
499 | unsigned long arg) | ||
500 | { | ||
501 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | ||
502 | |||
503 | switch (cmd) { | ||
504 | case DMA_SLAVE_CONFIG: | ||
505 | return bcm2835_dma_slave_config(c, | ||
506 | (struct dma_slave_config *)arg); | ||
507 | |||
508 | case DMA_TERMINATE_ALL: | ||
509 | return bcm2835_dma_terminate_all(c); | ||
510 | |||
511 | default: | ||
512 | return -ENXIO; | ||
513 | } | ||
514 | } | ||
515 | |||
516 | static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) | 501 | static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) |
517 | { | 502 | { |
518 | struct bcm2835_chan *c; | 503 | struct bcm2835_chan *c; |
@@ -565,18 +550,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec, | |||
565 | return chan; | 550 | return chan; |
566 | } | 551 | } |
567 | 552 | ||
568 | static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan, | ||
569 | struct dma_slave_caps *caps) | ||
570 | { | ||
571 | caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
572 | caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
573 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
574 | caps->cmd_pause = false; | ||
575 | caps->cmd_terminate = true; | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | static int bcm2835_dma_probe(struct platform_device *pdev) | 553 | static int bcm2835_dma_probe(struct platform_device *pdev) |
581 | { | 554 | { |
582 | struct bcm2835_dmadev *od; | 555 | struct bcm2835_dmadev *od; |
@@ -615,9 +588,12 @@ static int bcm2835_dma_probe(struct platform_device *pdev) | |||
615 | od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; | 588 | od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; |
616 | od->ddev.device_tx_status = bcm2835_dma_tx_status; | 589 | od->ddev.device_tx_status = bcm2835_dma_tx_status; |
617 | od->ddev.device_issue_pending = bcm2835_dma_issue_pending; | 590 | od->ddev.device_issue_pending = bcm2835_dma_issue_pending; |
618 | od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps; | ||
619 | od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; | 591 | od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; |
620 | od->ddev.device_control = bcm2835_dma_control; | 592 | od->ddev.device_config = bcm2835_dma_slave_config; |
593 | od->ddev.device_terminate_all = bcm2835_dma_terminate_all; | ||
594 | od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
595 | od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
596 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
621 | od->ddev.dev = &pdev->dev; | 597 | od->ddev.dev = &pdev->dev; |
622 | INIT_LIST_HEAD(&od->ddev.channels); | 598 | INIT_LIST_HEAD(&od->ddev.channels); |
623 | spin_lock_init(&od->lock); | 599 | spin_lock_init(&od->lock); |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index e88588d8ecd3..fd22dd36985f 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -1690,7 +1690,7 @@ static u32 coh901318_get_bytes_left(struct dma_chan *chan) | |||
1690 | * Pauses a transfer without losing data. Enables power save. | 1690 | * Pauses a transfer without losing data. Enables power save. |
1691 | * Use this function in conjunction with coh901318_resume. | 1691 | * Use this function in conjunction with coh901318_resume. |
1692 | */ | 1692 | */ |
1693 | static void coh901318_pause(struct dma_chan *chan) | 1693 | static int coh901318_pause(struct dma_chan *chan) |
1694 | { | 1694 | { |
1695 | u32 val; | 1695 | u32 val; |
1696 | unsigned long flags; | 1696 | unsigned long flags; |
@@ -1730,12 +1730,13 @@ static void coh901318_pause(struct dma_chan *chan) | |||
1730 | enable_powersave(cohc); | 1730 | enable_powersave(cohc); |
1731 | 1731 | ||
1732 | spin_unlock_irqrestore(&cohc->lock, flags); | 1732 | spin_unlock_irqrestore(&cohc->lock, flags); |
1733 | return 0; | ||
1733 | } | 1734 | } |
1734 | 1735 | ||
1735 | /* Resumes a transfer that has been stopped via 300_dma_stop(..). | 1736 | /* Resumes a transfer that has been stopped via 300_dma_stop(..). |
1736 | Power save is handled. | 1737 | Power save is handled. |
1737 | */ | 1738 | */ |
1738 | static void coh901318_resume(struct dma_chan *chan) | 1739 | static int coh901318_resume(struct dma_chan *chan) |
1739 | { | 1740 | { |
1740 | u32 val; | 1741 | u32 val; |
1741 | unsigned long flags; | 1742 | unsigned long flags; |
@@ -1760,6 +1761,7 @@ static void coh901318_resume(struct dma_chan *chan) | |||
1760 | } | 1761 | } |
1761 | 1762 | ||
1762 | spin_unlock_irqrestore(&cohc->lock, flags); | 1763 | spin_unlock_irqrestore(&cohc->lock, flags); |
1764 | return 0; | ||
1763 | } | 1765 | } |
1764 | 1766 | ||
1765 | bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) | 1767 | bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) |
@@ -2114,6 +2116,57 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) | |||
2114 | return IRQ_HANDLED; | 2116 | return IRQ_HANDLED; |
2115 | } | 2117 | } |
2116 | 2118 | ||
2119 | static int coh901318_terminate_all(struct dma_chan *chan) | ||
2120 | { | ||
2121 | unsigned long flags; | ||
2122 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | ||
2123 | struct coh901318_desc *cohd; | ||
2124 | void __iomem *virtbase = cohc->base->virtbase; | ||
2125 | |||
2126 | /* The remainder of this function terminates the transfer */ | ||
2127 | coh901318_pause(chan); | ||
2128 | spin_lock_irqsave(&cohc->lock, flags); | ||
2129 | |||
2130 | /* Clear any pending BE or TC interrupt */ | ||
2131 | if (cohc->id < 32) { | ||
2132 | writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1); | ||
2133 | writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1); | ||
2134 | } else { | ||
2135 | writel(1 << (cohc->id - 32), virtbase + | ||
2136 | COH901318_BE_INT_CLEAR2); | ||
2137 | writel(1 << (cohc->id - 32), virtbase + | ||
2138 | COH901318_TC_INT_CLEAR2); | ||
2139 | } | ||
2140 | |||
2141 | enable_powersave(cohc); | ||
2142 | |||
2143 | while ((cohd = coh901318_first_active_get(cohc))) { | ||
2144 | /* release the lli allocation*/ | ||
2145 | coh901318_lli_free(&cohc->base->pool, &cohd->lli); | ||
2146 | |||
2147 | /* return desc to free-list */ | ||
2148 | coh901318_desc_remove(cohd); | ||
2149 | coh901318_desc_free(cohc, cohd); | ||
2150 | } | ||
2151 | |||
2152 | while ((cohd = coh901318_first_queued(cohc))) { | ||
2153 | /* release the lli allocation*/ | ||
2154 | coh901318_lli_free(&cohc->base->pool, &cohd->lli); | ||
2155 | |||
2156 | /* return desc to free-list */ | ||
2157 | coh901318_desc_remove(cohd); | ||
2158 | coh901318_desc_free(cohc, cohd); | ||
2159 | } | ||
2160 | |||
2161 | |||
2162 | cohc->nbr_active_done = 0; | ||
2163 | cohc->busy = 0; | ||
2164 | |||
2165 | spin_unlock_irqrestore(&cohc->lock, flags); | ||
2166 | |||
2167 | return 0; | ||
2168 | } | ||
2169 | |||
2117 | static int coh901318_alloc_chan_resources(struct dma_chan *chan) | 2170 | static int coh901318_alloc_chan_resources(struct dma_chan *chan) |
2118 | { | 2171 | { |
2119 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 2172 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
@@ -2156,7 +2209,7 @@ coh901318_free_chan_resources(struct dma_chan *chan) | |||
2156 | 2209 | ||
2157 | spin_unlock_irqrestore(&cohc->lock, flags); | 2210 | spin_unlock_irqrestore(&cohc->lock, flags); |
2158 | 2211 | ||
2159 | dmaengine_terminate_all(chan); | 2212 | coh901318_terminate_all(chan); |
2160 | } | 2213 | } |
2161 | 2214 | ||
2162 | 2215 | ||
@@ -2461,8 +2514,8 @@ static const struct burst_table burst_sizes[] = { | |||
2461 | }, | 2514 | }, |
2462 | }; | 2515 | }; |
2463 | 2516 | ||
2464 | static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, | 2517 | static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan, |
2465 | struct dma_slave_config *config) | 2518 | struct dma_slave_config *config) |
2466 | { | 2519 | { |
2467 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 2520 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
2468 | dma_addr_t addr; | 2521 | dma_addr_t addr; |
@@ -2482,7 +2535,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, | |||
2482 | maxburst = config->dst_maxburst; | 2535 | maxburst = config->dst_maxburst; |
2483 | } else { | 2536 | } else { |
2484 | dev_err(COHC_2_DEV(cohc), "illegal channel mode\n"); | 2537 | dev_err(COHC_2_DEV(cohc), "illegal channel mode\n"); |
2485 | return; | 2538 | return -EINVAL; |
2486 | } | 2539 | } |
2487 | 2540 | ||
2488 | dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n", | 2541 | dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n", |
@@ -2528,7 +2581,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, | |||
2528 | default: | 2581 | default: |
2529 | dev_err(COHC_2_DEV(cohc), | 2582 | dev_err(COHC_2_DEV(cohc), |
2530 | "bad runtimeconfig: alien address width\n"); | 2583 | "bad runtimeconfig: alien address width\n"); |
2531 | return; | 2584 | return -EINVAL; |
2532 | } | 2585 | } |
2533 | 2586 | ||
2534 | ctrl |= burst_sizes[i].reg; | 2587 | ctrl |= burst_sizes[i].reg; |
@@ -2538,84 +2591,12 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, | |||
2538 | 2591 | ||
2539 | cohc->addr = addr; | 2592 | cohc->addr = addr; |
2540 | cohc->ctrl = ctrl; | 2593 | cohc->ctrl = ctrl; |
2541 | } | ||
2542 | |||
2543 | static int | ||
2544 | coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
2545 | unsigned long arg) | ||
2546 | { | ||
2547 | unsigned long flags; | ||
2548 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | ||
2549 | struct coh901318_desc *cohd; | ||
2550 | void __iomem *virtbase = cohc->base->virtbase; | ||
2551 | |||
2552 | if (cmd == DMA_SLAVE_CONFIG) { | ||
2553 | struct dma_slave_config *config = | ||
2554 | (struct dma_slave_config *) arg; | ||
2555 | |||
2556 | coh901318_dma_set_runtimeconfig(chan, config); | ||
2557 | return 0; | ||
2558 | } | ||
2559 | |||
2560 | if (cmd == DMA_PAUSE) { | ||
2561 | coh901318_pause(chan); | ||
2562 | return 0; | ||
2563 | } | ||
2564 | |||
2565 | if (cmd == DMA_RESUME) { | ||
2566 | coh901318_resume(chan); | ||
2567 | return 0; | ||
2568 | } | ||
2569 | |||
2570 | if (cmd != DMA_TERMINATE_ALL) | ||
2571 | return -ENXIO; | ||
2572 | |||
2573 | /* The remainder of this function terminates the transfer */ | ||
2574 | coh901318_pause(chan); | ||
2575 | spin_lock_irqsave(&cohc->lock, flags); | ||
2576 | |||
2577 | /* Clear any pending BE or TC interrupt */ | ||
2578 | if (cohc->id < 32) { | ||
2579 | writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1); | ||
2580 | writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1); | ||
2581 | } else { | ||
2582 | writel(1 << (cohc->id - 32), virtbase + | ||
2583 | COH901318_BE_INT_CLEAR2); | ||
2584 | writel(1 << (cohc->id - 32), virtbase + | ||
2585 | COH901318_TC_INT_CLEAR2); | ||
2586 | } | ||
2587 | |||
2588 | enable_powersave(cohc); | ||
2589 | |||
2590 | while ((cohd = coh901318_first_active_get(cohc))) { | ||
2591 | /* release the lli allocation*/ | ||
2592 | coh901318_lli_free(&cohc->base->pool, &cohd->lli); | ||
2593 | |||
2594 | /* return desc to free-list */ | ||
2595 | coh901318_desc_remove(cohd); | ||
2596 | coh901318_desc_free(cohc, cohd); | ||
2597 | } | ||
2598 | |||
2599 | while ((cohd = coh901318_first_queued(cohc))) { | ||
2600 | /* release the lli allocation*/ | ||
2601 | coh901318_lli_free(&cohc->base->pool, &cohd->lli); | ||
2602 | |||
2603 | /* return desc to free-list */ | ||
2604 | coh901318_desc_remove(cohd); | ||
2605 | coh901318_desc_free(cohc, cohd); | ||
2606 | } | ||
2607 | |||
2608 | |||
2609 | cohc->nbr_active_done = 0; | ||
2610 | cohc->busy = 0; | ||
2611 | |||
2612 | spin_unlock_irqrestore(&cohc->lock, flags); | ||
2613 | 2594 | ||
2614 | return 0; | 2595 | return 0; |
2615 | } | 2596 | } |
2616 | 2597 | ||
2617 | void coh901318_base_init(struct dma_device *dma, const int *pick_chans, | 2598 | static void coh901318_base_init(struct dma_device *dma, const int *pick_chans, |
2618 | struct coh901318_base *base) | 2599 | struct coh901318_base *base) |
2619 | { | 2600 | { |
2620 | int chans_i; | 2601 | int chans_i; |
2621 | int i = 0; | 2602 | int i = 0; |
@@ -2717,7 +2698,10 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
2717 | base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; | 2698 | base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; |
2718 | base->dma_slave.device_tx_status = coh901318_tx_status; | 2699 | base->dma_slave.device_tx_status = coh901318_tx_status; |
2719 | base->dma_slave.device_issue_pending = coh901318_issue_pending; | 2700 | base->dma_slave.device_issue_pending = coh901318_issue_pending; |
2720 | base->dma_slave.device_control = coh901318_control; | 2701 | base->dma_slave.device_config = coh901318_dma_set_runtimeconfig; |
2702 | base->dma_slave.device_pause = coh901318_pause; | ||
2703 | base->dma_slave.device_resume = coh901318_resume; | ||
2704 | base->dma_slave.device_terminate_all = coh901318_terminate_all; | ||
2721 | base->dma_slave.dev = &pdev->dev; | 2705 | base->dma_slave.dev = &pdev->dev; |
2722 | 2706 | ||
2723 | err = dma_async_device_register(&base->dma_slave); | 2707 | err = dma_async_device_register(&base->dma_slave); |
@@ -2737,7 +2721,10 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
2737 | base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; | 2721 | base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; |
2738 | base->dma_memcpy.device_tx_status = coh901318_tx_status; | 2722 | base->dma_memcpy.device_tx_status = coh901318_tx_status; |
2739 | base->dma_memcpy.device_issue_pending = coh901318_issue_pending; | 2723 | base->dma_memcpy.device_issue_pending = coh901318_issue_pending; |
2740 | base->dma_memcpy.device_control = coh901318_control; | 2724 | base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig; |
2725 | base->dma_memcpy.device_pause = coh901318_pause; | ||
2726 | base->dma_memcpy.device_resume = coh901318_resume; | ||
2727 | base->dma_memcpy.device_terminate_all = coh901318_terminate_all; | ||
2741 | base->dma_memcpy.dev = &pdev->dev; | 2728 | base->dma_memcpy.dev = &pdev->dev; |
2742 | /* | 2729 | /* |
2743 | * This controller can only access address at even 32bit boundaries, | 2730 | * This controller can only access address at even 32bit boundaries, |
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index b743adf56465..512cb8e2805e 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c | |||
@@ -525,12 +525,6 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( | |||
525 | return &c->txd; | 525 | return &c->txd; |
526 | } | 526 | } |
527 | 527 | ||
528 | static int cpp41_cfg_chan(struct cppi41_channel *c, | ||
529 | struct dma_slave_config *cfg) | ||
530 | { | ||
531 | return 0; | ||
532 | } | ||
533 | |||
534 | static void cppi41_compute_td_desc(struct cppi41_desc *d) | 528 | static void cppi41_compute_td_desc(struct cppi41_desc *d) |
535 | { | 529 | { |
536 | d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; | 530 | d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; |
@@ -647,28 +641,6 @@ static int cppi41_stop_chan(struct dma_chan *chan) | |||
647 | return 0; | 641 | return 0; |
648 | } | 642 | } |
649 | 643 | ||
650 | static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
651 | unsigned long arg) | ||
652 | { | ||
653 | struct cppi41_channel *c = to_cpp41_chan(chan); | ||
654 | int ret; | ||
655 | |||
656 | switch (cmd) { | ||
657 | case DMA_SLAVE_CONFIG: | ||
658 | ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg); | ||
659 | break; | ||
660 | |||
661 | case DMA_TERMINATE_ALL: | ||
662 | ret = cppi41_stop_chan(chan); | ||
663 | break; | ||
664 | |||
665 | default: | ||
666 | ret = -ENXIO; | ||
667 | break; | ||
668 | } | ||
669 | return ret; | ||
670 | } | ||
671 | |||
672 | static void cleanup_chans(struct cppi41_dd *cdd) | 644 | static void cleanup_chans(struct cppi41_dd *cdd) |
673 | { | 645 | { |
674 | while (!list_empty(&cdd->ddev.channels)) { | 646 | while (!list_empty(&cdd->ddev.channels)) { |
@@ -953,7 +925,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
953 | cdd->ddev.device_tx_status = cppi41_dma_tx_status; | 925 | cdd->ddev.device_tx_status = cppi41_dma_tx_status; |
954 | cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; | 926 | cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; |
955 | cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; | 927 | cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; |
956 | cdd->ddev.device_control = cppi41_dma_control; | 928 | cdd->ddev.device_terminate_all = cppi41_stop_chan; |
957 | cdd->ddev.dev = dev; | 929 | cdd->ddev.dev = dev; |
958 | INIT_LIST_HEAD(&cdd->ddev.channels); | 930 | INIT_LIST_HEAD(&cdd->ddev.channels); |
959 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; | 931 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; |
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index bdeafeefa5f6..4527a3ebeac4 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c | |||
@@ -210,7 +210,7 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst) | |||
210 | } | 210 | } |
211 | 211 | ||
212 | static int jz4740_dma_slave_config(struct dma_chan *c, | 212 | static int jz4740_dma_slave_config(struct dma_chan *c, |
213 | const struct dma_slave_config *config) | 213 | struct dma_slave_config *config) |
214 | { | 214 | { |
215 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); | 215 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); |
216 | struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); | 216 | struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); |
@@ -290,21 +290,6 @@ static int jz4740_dma_terminate_all(struct dma_chan *c) | |||
290 | return 0; | 290 | return 0; |
291 | } | 291 | } |
292 | 292 | ||
293 | static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
294 | unsigned long arg) | ||
295 | { | ||
296 | struct dma_slave_config *config = (struct dma_slave_config *)arg; | ||
297 | |||
298 | switch (cmd) { | ||
299 | case DMA_SLAVE_CONFIG: | ||
300 | return jz4740_dma_slave_config(chan, config); | ||
301 | case DMA_TERMINATE_ALL: | ||
302 | return jz4740_dma_terminate_all(chan); | ||
303 | default: | ||
304 | return -ENOSYS; | ||
305 | } | ||
306 | } | ||
307 | |||
308 | static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) | 293 | static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) |
309 | { | 294 | { |
310 | struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); | 295 | struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); |
@@ -561,7 +546,8 @@ static int jz4740_dma_probe(struct platform_device *pdev) | |||
561 | dd->device_issue_pending = jz4740_dma_issue_pending; | 546 | dd->device_issue_pending = jz4740_dma_issue_pending; |
562 | dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg; | 547 | dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg; |
563 | dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; | 548 | dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; |
564 | dd->device_control = jz4740_dma_control; | 549 | dd->device_config = jz4740_dma_slave_config; |
550 | dd->device_terminate_all = jz4740_dma_terminate_all; | ||
565 | dd->dev = &pdev->dev; | 551 | dd->dev = &pdev->dev; |
566 | INIT_LIST_HEAD(&dd->channels); | 552 | INIT_LIST_HEAD(&dd->channels); |
567 | 553 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index e057935e3023..f15712f2fec6 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -222,31 +222,35 @@ static void balance_ref_count(struct dma_chan *chan) | |||
222 | */ | 222 | */ |
223 | static int dma_chan_get(struct dma_chan *chan) | 223 | static int dma_chan_get(struct dma_chan *chan) |
224 | { | 224 | { |
225 | int err = -ENODEV; | ||
226 | struct module *owner = dma_chan_to_owner(chan); | 225 | struct module *owner = dma_chan_to_owner(chan); |
226 | int ret; | ||
227 | 227 | ||
228 | /* The channel is already in use, update client count */ | ||
228 | if (chan->client_count) { | 229 | if (chan->client_count) { |
229 | __module_get(owner); | 230 | __module_get(owner); |
230 | err = 0; | 231 | goto out; |
231 | } else if (try_module_get(owner)) | 232 | } |
232 | err = 0; | ||
233 | 233 | ||
234 | if (err == 0) | 234 | if (!try_module_get(owner)) |
235 | chan->client_count++; | 235 | return -ENODEV; |
236 | 236 | ||
237 | /* allocate upon first client reference */ | 237 | /* allocate upon first client reference */ |
238 | if (chan->client_count == 1 && err == 0) { | 238 | if (chan->device->device_alloc_chan_resources) { |
239 | int desc_cnt = chan->device->device_alloc_chan_resources(chan); | 239 | ret = chan->device->device_alloc_chan_resources(chan); |
240 | 240 | if (ret < 0) | |
241 | if (desc_cnt < 0) { | 241 | goto err_out; |
242 | err = desc_cnt; | ||
243 | chan->client_count = 0; | ||
244 | module_put(owner); | ||
245 | } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) | ||
246 | balance_ref_count(chan); | ||
247 | } | 242 | } |
248 | 243 | ||
249 | return err; | 244 | if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) |
245 | balance_ref_count(chan); | ||
246 | |||
247 | out: | ||
248 | chan->client_count++; | ||
249 | return 0; | ||
250 | |||
251 | err_out: | ||
252 | module_put(owner); | ||
253 | return ret; | ||
250 | } | 254 | } |
251 | 255 | ||
252 | /** | 256 | /** |
@@ -257,11 +261,15 @@ static int dma_chan_get(struct dma_chan *chan) | |||
257 | */ | 261 | */ |
258 | static void dma_chan_put(struct dma_chan *chan) | 262 | static void dma_chan_put(struct dma_chan *chan) |
259 | { | 263 | { |
264 | /* This channel is not in use, bail out */ | ||
260 | if (!chan->client_count) | 265 | if (!chan->client_count) |
261 | return; /* this channel failed alloc_chan_resources */ | 266 | return; |
267 | |||
262 | chan->client_count--; | 268 | chan->client_count--; |
263 | module_put(dma_chan_to_owner(chan)); | 269 | module_put(dma_chan_to_owner(chan)); |
264 | if (chan->client_count == 0) | 270 | |
271 | /* This channel is not in use anymore, free it */ | ||
272 | if (!chan->client_count && chan->device->device_free_chan_resources) | ||
265 | chan->device->device_free_chan_resources(chan); | 273 | chan->device->device_free_chan_resources(chan); |
266 | } | 274 | } |
267 | 275 | ||
@@ -471,6 +479,39 @@ static void dma_channel_rebalance(void) | |||
471 | } | 479 | } |
472 | } | 480 | } |
473 | 481 | ||
482 | int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) | ||
483 | { | ||
484 | struct dma_device *device; | ||
485 | |||
486 | if (!chan || !caps) | ||
487 | return -EINVAL; | ||
488 | |||
489 | device = chan->device; | ||
490 | |||
491 | /* check if the channel supports slave transactions */ | ||
492 | if (!test_bit(DMA_SLAVE, device->cap_mask.bits)) | ||
493 | return -ENXIO; | ||
494 | |||
495 | /* | ||
496 | * Check whether it reports it uses the generic slave | ||
497 | * capabilities, if not, that means it doesn't support any | ||
498 | * kind of slave capabilities reporting. | ||
499 | */ | ||
500 | if (!device->directions) | ||
501 | return -ENXIO; | ||
502 | |||
503 | caps->src_addr_widths = device->src_addr_widths; | ||
504 | caps->dst_addr_widths = device->dst_addr_widths; | ||
505 | caps->directions = device->directions; | ||
506 | caps->residue_granularity = device->residue_granularity; | ||
507 | |||
508 | caps->cmd_pause = !!device->device_pause; | ||
509 | caps->cmd_terminate = !!device->device_terminate_all; | ||
510 | |||
511 | return 0; | ||
512 | } | ||
513 | EXPORT_SYMBOL_GPL(dma_get_slave_caps); | ||
514 | |||
474 | static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, | 515 | static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, |
475 | struct dma_device *dev, | 516 | struct dma_device *dev, |
476 | dma_filter_fn fn, void *fn_param) | 517 | dma_filter_fn fn, void *fn_param) |
@@ -811,17 +852,16 @@ int dma_async_device_register(struct dma_device *device) | |||
811 | !device->device_prep_dma_sg); | 852 | !device->device_prep_dma_sg); |
812 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && | 853 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && |
813 | !device->device_prep_dma_cyclic); | 854 | !device->device_prep_dma_cyclic); |
814 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | ||
815 | !device->device_control); | ||
816 | BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && | 855 | BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && |
817 | !device->device_prep_interleaved_dma); | 856 | !device->device_prep_interleaved_dma); |
818 | 857 | ||
819 | BUG_ON(!device->device_alloc_chan_resources); | ||
820 | BUG_ON(!device->device_free_chan_resources); | ||
821 | BUG_ON(!device->device_tx_status); | 858 | BUG_ON(!device->device_tx_status); |
822 | BUG_ON(!device->device_issue_pending); | 859 | BUG_ON(!device->device_issue_pending); |
823 | BUG_ON(!device->dev); | 860 | BUG_ON(!device->dev); |
824 | 861 | ||
862 | WARN(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->directions, | ||
863 | "this driver doesn't support generic slave capabilities reporting\n"); | ||
864 | |||
825 | /* note: this only matters in the | 865 | /* note: this only matters in the |
826 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case | 866 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case |
827 | */ | 867 | */ |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 380478562b7d..fcb9a916e6f6 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -61,6 +61,13 @@ | |||
61 | */ | 61 | */ |
62 | #define NR_DESCS_PER_CHANNEL 64 | 62 | #define NR_DESCS_PER_CHANNEL 64 |
63 | 63 | ||
64 | /* The set of bus widths supported by the DMA controller */ | ||
65 | #define DW_DMA_BUSWIDTHS \ | ||
66 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | ||
67 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
68 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
69 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
70 | |||
64 | /*----------------------------------------------------------------------*/ | 71 | /*----------------------------------------------------------------------*/ |
65 | 72 | ||
66 | static struct device *chan2dev(struct dma_chan *chan) | 73 | static struct device *chan2dev(struct dma_chan *chan) |
@@ -955,8 +962,7 @@ static inline void convert_burst(u32 *maxburst) | |||
955 | *maxburst = 0; | 962 | *maxburst = 0; |
956 | } | 963 | } |
957 | 964 | ||
958 | static int | 965 | static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) |
959 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | ||
960 | { | 966 | { |
961 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 967 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
962 | 968 | ||
@@ -973,16 +979,25 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |||
973 | return 0; | 979 | return 0; |
974 | } | 980 | } |
975 | 981 | ||
976 | static inline void dwc_chan_pause(struct dw_dma_chan *dwc) | 982 | static int dwc_pause(struct dma_chan *chan) |
977 | { | 983 | { |
978 | u32 cfglo = channel_readl(dwc, CFG_LO); | 984 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
979 | unsigned int count = 20; /* timeout iterations */ | 985 | unsigned long flags; |
986 | unsigned int count = 20; /* timeout iterations */ | ||
987 | u32 cfglo; | ||
988 | |||
989 | spin_lock_irqsave(&dwc->lock, flags); | ||
980 | 990 | ||
991 | cfglo = channel_readl(dwc, CFG_LO); | ||
981 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | 992 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); |
982 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) | 993 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) |
983 | udelay(2); | 994 | udelay(2); |
984 | 995 | ||
985 | dwc->paused = true; | 996 | dwc->paused = true; |
997 | |||
998 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
999 | |||
1000 | return 0; | ||
986 | } | 1001 | } |
987 | 1002 | ||
988 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | 1003 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc) |
@@ -994,53 +1009,48 @@ static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | |||
994 | dwc->paused = false; | 1009 | dwc->paused = false; |
995 | } | 1010 | } |
996 | 1011 | ||
997 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1012 | static int dwc_resume(struct dma_chan *chan) |
998 | unsigned long arg) | ||
999 | { | 1013 | { |
1000 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1014 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1001 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
1002 | struct dw_desc *desc, *_desc; | ||
1003 | unsigned long flags; | 1015 | unsigned long flags; |
1004 | LIST_HEAD(list); | ||
1005 | 1016 | ||
1006 | if (cmd == DMA_PAUSE) { | 1017 | if (!dwc->paused) |
1007 | spin_lock_irqsave(&dwc->lock, flags); | 1018 | return 0; |
1008 | 1019 | ||
1009 | dwc_chan_pause(dwc); | 1020 | spin_lock_irqsave(&dwc->lock, flags); |
1010 | 1021 | ||
1011 | spin_unlock_irqrestore(&dwc->lock, flags); | 1022 | dwc_chan_resume(dwc); |
1012 | } else if (cmd == DMA_RESUME) { | ||
1013 | if (!dwc->paused) | ||
1014 | return 0; | ||
1015 | 1023 | ||
1016 | spin_lock_irqsave(&dwc->lock, flags); | 1024 | spin_unlock_irqrestore(&dwc->lock, flags); |
1017 | 1025 | ||
1018 | dwc_chan_resume(dwc); | 1026 | return 0; |
1027 | } | ||
1019 | 1028 | ||
1020 | spin_unlock_irqrestore(&dwc->lock, flags); | 1029 | static int dwc_terminate_all(struct dma_chan *chan) |
1021 | } else if (cmd == DMA_TERMINATE_ALL) { | 1030 | { |
1022 | spin_lock_irqsave(&dwc->lock, flags); | 1031 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1032 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
1033 | struct dw_desc *desc, *_desc; | ||
1034 | unsigned long flags; | ||
1035 | LIST_HEAD(list); | ||
1023 | 1036 | ||
1024 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | 1037 | spin_lock_irqsave(&dwc->lock, flags); |
1025 | 1038 | ||
1026 | dwc_chan_disable(dw, dwc); | 1039 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); |
1040 | |||
1041 | dwc_chan_disable(dw, dwc); | ||
1027 | 1042 | ||
1028 | dwc_chan_resume(dwc); | 1043 | dwc_chan_resume(dwc); |
1029 | 1044 | ||
1030 | /* active_list entries will end up before queued entries */ | 1045 | /* active_list entries will end up before queued entries */ |
1031 | list_splice_init(&dwc->queue, &list); | 1046 | list_splice_init(&dwc->queue, &list); |
1032 | list_splice_init(&dwc->active_list, &list); | 1047 | list_splice_init(&dwc->active_list, &list); |
1033 | 1048 | ||
1034 | spin_unlock_irqrestore(&dwc->lock, flags); | 1049 | spin_unlock_irqrestore(&dwc->lock, flags); |
1035 | 1050 | ||
1036 | /* Flush all pending and queued descriptors */ | 1051 | /* Flush all pending and queued descriptors */ |
1037 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 1052 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
1038 | dwc_descriptor_complete(dwc, desc, false); | 1053 | dwc_descriptor_complete(dwc, desc, false); |
1039 | } else if (cmd == DMA_SLAVE_CONFIG) { | ||
1040 | return set_runtime_config(chan, (struct dma_slave_config *)arg); | ||
1041 | } else { | ||
1042 | return -ENXIO; | ||
1043 | } | ||
1044 | 1054 | ||
1045 | return 0; | 1055 | return 0; |
1046 | } | 1056 | } |
@@ -1657,13 +1667,23 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1657 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | 1667 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; |
1658 | 1668 | ||
1659 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | 1669 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; |
1660 | |||
1661 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; | 1670 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; |
1662 | dw->dma.device_control = dwc_control; | 1671 | |
1672 | dw->dma.device_config = dwc_config; | ||
1673 | dw->dma.device_pause = dwc_pause; | ||
1674 | dw->dma.device_resume = dwc_resume; | ||
1675 | dw->dma.device_terminate_all = dwc_terminate_all; | ||
1663 | 1676 | ||
1664 | dw->dma.device_tx_status = dwc_tx_status; | 1677 | dw->dma.device_tx_status = dwc_tx_status; |
1665 | dw->dma.device_issue_pending = dwc_issue_pending; | 1678 | dw->dma.device_issue_pending = dwc_issue_pending; |
1666 | 1679 | ||
1680 | /* DMA capabilities */ | ||
1681 | dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; | ||
1682 | dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; | ||
1683 | dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | | ||
1684 | BIT(DMA_MEM_TO_MEM); | ||
1685 | dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1686 | |||
1667 | err = dma_async_device_register(&dw->dma); | 1687 | err = dma_async_device_register(&dw->dma); |
1668 | if (err) | 1688 | if (err) |
1669 | goto err_dma_register; | 1689 | goto err_dma_register; |
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 848e232f7cc7..254a1db03680 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h | |||
@@ -252,7 +252,7 @@ struct dw_dma_chan { | |||
252 | u8 src_master; | 252 | u8 src_master; |
253 | u8 dst_master; | 253 | u8 dst_master; |
254 | 254 | ||
255 | /* configuration passed via DMA_SLAVE_CONFIG */ | 255 | /* configuration passed via .device_config */ |
256 | struct dma_slave_config dma_sconfig; | 256 | struct dma_slave_config dma_sconfig; |
257 | }; | 257 | }; |
258 | 258 | ||
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index b969206439b7..e95fa7dabc0c 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -244,8 +244,9 @@ static void edma_execute(struct edma_chan *echan) | |||
244 | } | 244 | } |
245 | } | 245 | } |
246 | 246 | ||
247 | static int edma_terminate_all(struct edma_chan *echan) | 247 | static int edma_terminate_all(struct dma_chan *chan) |
248 | { | 248 | { |
249 | struct edma_chan *echan = to_edma_chan(chan); | ||
249 | unsigned long flags; | 250 | unsigned long flags; |
250 | LIST_HEAD(head); | 251 | LIST_HEAD(head); |
251 | 252 | ||
@@ -273,9 +274,11 @@ static int edma_terminate_all(struct edma_chan *echan) | |||
273 | return 0; | 274 | return 0; |
274 | } | 275 | } |
275 | 276 | ||
276 | static int edma_slave_config(struct edma_chan *echan, | 277 | static int edma_slave_config(struct dma_chan *chan, |
277 | struct dma_slave_config *cfg) | 278 | struct dma_slave_config *cfg) |
278 | { | 279 | { |
280 | struct edma_chan *echan = to_edma_chan(chan); | ||
281 | |||
279 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | 282 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
280 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | 283 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
281 | return -EINVAL; | 284 | return -EINVAL; |
@@ -285,8 +288,10 @@ static int edma_slave_config(struct edma_chan *echan, | |||
285 | return 0; | 288 | return 0; |
286 | } | 289 | } |
287 | 290 | ||
288 | static int edma_dma_pause(struct edma_chan *echan) | 291 | static int edma_dma_pause(struct dma_chan *chan) |
289 | { | 292 | { |
293 | struct edma_chan *echan = to_edma_chan(chan); | ||
294 | |||
290 | /* Pause/Resume only allowed with cyclic mode */ | 295 | /* Pause/Resume only allowed with cyclic mode */ |
291 | if (!echan->edesc || !echan->edesc->cyclic) | 296 | if (!echan->edesc || !echan->edesc->cyclic) |
292 | return -EINVAL; | 297 | return -EINVAL; |
@@ -295,8 +300,10 @@ static int edma_dma_pause(struct edma_chan *echan) | |||
295 | return 0; | 300 | return 0; |
296 | } | 301 | } |
297 | 302 | ||
298 | static int edma_dma_resume(struct edma_chan *echan) | 303 | static int edma_dma_resume(struct dma_chan *chan) |
299 | { | 304 | { |
305 | struct edma_chan *echan = to_edma_chan(chan); | ||
306 | |||
300 | /* Pause/Resume only allowed with cyclic mode */ | 307 | /* Pause/Resume only allowed with cyclic mode */ |
301 | if (!echan->edesc->cyclic) | 308 | if (!echan->edesc->cyclic) |
302 | return -EINVAL; | 309 | return -EINVAL; |
@@ -305,36 +312,6 @@ static int edma_dma_resume(struct edma_chan *echan) | |||
305 | return 0; | 312 | return 0; |
306 | } | 313 | } |
307 | 314 | ||
308 | static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
309 | unsigned long arg) | ||
310 | { | ||
311 | int ret = 0; | ||
312 | struct dma_slave_config *config; | ||
313 | struct edma_chan *echan = to_edma_chan(chan); | ||
314 | |||
315 | switch (cmd) { | ||
316 | case DMA_TERMINATE_ALL: | ||
317 | edma_terminate_all(echan); | ||
318 | break; | ||
319 | case DMA_SLAVE_CONFIG: | ||
320 | config = (struct dma_slave_config *)arg; | ||
321 | ret = edma_slave_config(echan, config); | ||
322 | break; | ||
323 | case DMA_PAUSE: | ||
324 | ret = edma_dma_pause(echan); | ||
325 | break; | ||
326 | |||
327 | case DMA_RESUME: | ||
328 | ret = edma_dma_resume(echan); | ||
329 | break; | ||
330 | |||
331 | default: | ||
332 | ret = -ENOSYS; | ||
333 | } | ||
334 | |||
335 | return ret; | ||
336 | } | ||
337 | |||
338 | /* | 315 | /* |
339 | * A PaRAM set configuration abstraction used by other modes | 316 | * A PaRAM set configuration abstraction used by other modes |
340 | * @chan: Channel who's PaRAM set we're configuring | 317 | * @chan: Channel who's PaRAM set we're configuring |
@@ -994,19 +971,6 @@ static void __init edma_chan_init(struct edma_cc *ecc, | |||
994 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ | 971 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ |
995 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | 972 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) |
996 | 973 | ||
997 | static int edma_dma_device_slave_caps(struct dma_chan *dchan, | ||
998 | struct dma_slave_caps *caps) | ||
999 | { | ||
1000 | caps->src_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
1001 | caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
1002 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1003 | caps->cmd_pause = true; | ||
1004 | caps->cmd_terminate = true; | ||
1005 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1006 | |||
1007 | return 0; | ||
1008 | } | ||
1009 | |||
1010 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | 974 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, |
1011 | struct device *dev) | 975 | struct device *dev) |
1012 | { | 976 | { |
@@ -1017,8 +981,16 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | |||
1017 | dma->device_free_chan_resources = edma_free_chan_resources; | 981 | dma->device_free_chan_resources = edma_free_chan_resources; |
1018 | dma->device_issue_pending = edma_issue_pending; | 982 | dma->device_issue_pending = edma_issue_pending; |
1019 | dma->device_tx_status = edma_tx_status; | 983 | dma->device_tx_status = edma_tx_status; |
1020 | dma->device_control = edma_control; | 984 | dma->device_config = edma_slave_config; |
1021 | dma->device_slave_caps = edma_dma_device_slave_caps; | 985 | dma->device_pause = edma_dma_pause; |
986 | dma->device_resume = edma_dma_resume; | ||
987 | dma->device_terminate_all = edma_terminate_all; | ||
988 | |||
989 | dma->src_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
990 | dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
991 | dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
992 | dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
993 | |||
1022 | dma->dev = dev; | 994 | dma->dev = dev; |
1023 | 995 | ||
1024 | /* | 996 | /* |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 7650470196c4..24e5290faa32 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -144,7 +144,7 @@ struct ep93xx_dma_desc { | |||
144 | * @queue: pending descriptors which are handled next | 144 | * @queue: pending descriptors which are handled next |
145 | * @free_list: list of free descriptors which can be used | 145 | * @free_list: list of free descriptors which can be used |
146 | * @runtime_addr: physical address currently used as dest/src (M2M only). This | 146 | * @runtime_addr: physical address currently used as dest/src (M2M only). This |
147 | * is set via %DMA_SLAVE_CONFIG before slave operation is | 147 | * is set via .device_config before slave operation is |
148 | * prepared | 148 | * prepared |
149 | * @runtime_ctrl: M2M runtime values for the control register. | 149 | * @runtime_ctrl: M2M runtime values for the control register. |
150 | * | 150 | * |
@@ -1164,13 +1164,14 @@ fail: | |||
1164 | 1164 | ||
1165 | /** | 1165 | /** |
1166 | * ep93xx_dma_terminate_all - terminate all transactions | 1166 | * ep93xx_dma_terminate_all - terminate all transactions |
1167 | * @edmac: channel | 1167 | * @chan: channel |
1168 | * | 1168 | * |
1169 | * Stops all DMA transactions. All descriptors are put back to the | 1169 | * Stops all DMA transactions. All descriptors are put back to the |
1170 | * @edmac->free_list and callbacks are _not_ called. | 1170 | * @edmac->free_list and callbacks are _not_ called. |
1171 | */ | 1171 | */ |
1172 | static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) | 1172 | static int ep93xx_dma_terminate_all(struct dma_chan *chan) |
1173 | { | 1173 | { |
1174 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
1174 | struct ep93xx_dma_desc *desc, *_d; | 1175 | struct ep93xx_dma_desc *desc, *_d; |
1175 | unsigned long flags; | 1176 | unsigned long flags; |
1176 | LIST_HEAD(list); | 1177 | LIST_HEAD(list); |
@@ -1194,9 +1195,10 @@ static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) | |||
1194 | return 0; | 1195 | return 0; |
1195 | } | 1196 | } |
1196 | 1197 | ||
1197 | static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, | 1198 | static int ep93xx_dma_slave_config(struct dma_chan *chan, |
1198 | struct dma_slave_config *config) | 1199 | struct dma_slave_config *config) |
1199 | { | 1200 | { |
1201 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
1200 | enum dma_slave_buswidth width; | 1202 | enum dma_slave_buswidth width; |
1201 | unsigned long flags; | 1203 | unsigned long flags; |
1202 | u32 addr, ctrl; | 1204 | u32 addr, ctrl; |
@@ -1242,36 +1244,6 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, | |||
1242 | } | 1244 | } |
1243 | 1245 | ||
1244 | /** | 1246 | /** |
1245 | * ep93xx_dma_control - manipulate all pending operations on a channel | ||
1246 | * @chan: channel | ||
1247 | * @cmd: control command to perform | ||
1248 | * @arg: optional argument | ||
1249 | * | ||
1250 | * Controls the channel. Function returns %0 in case of success or negative | ||
1251 | * error in case of failure. | ||
1252 | */ | ||
1253 | static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
1254 | unsigned long arg) | ||
1255 | { | ||
1256 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
1257 | struct dma_slave_config *config; | ||
1258 | |||
1259 | switch (cmd) { | ||
1260 | case DMA_TERMINATE_ALL: | ||
1261 | return ep93xx_dma_terminate_all(edmac); | ||
1262 | |||
1263 | case DMA_SLAVE_CONFIG: | ||
1264 | config = (struct dma_slave_config *)arg; | ||
1265 | return ep93xx_dma_slave_config(edmac, config); | ||
1266 | |||
1267 | default: | ||
1268 | break; | ||
1269 | } | ||
1270 | |||
1271 | return -ENOSYS; | ||
1272 | } | ||
1273 | |||
1274 | /** | ||
1275 | * ep93xx_dma_tx_status - check if a transaction is completed | 1247 | * ep93xx_dma_tx_status - check if a transaction is completed |
1276 | * @chan: channel | 1248 | * @chan: channel |
1277 | * @cookie: transaction specific cookie | 1249 | * @cookie: transaction specific cookie |
@@ -1352,7 +1324,8 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) | |||
1352 | dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; | 1324 | dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; |
1353 | dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; | 1325 | dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; |
1354 | dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; | 1326 | dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; |
1355 | dma_dev->device_control = ep93xx_dma_control; | 1327 | dma_dev->device_config = ep93xx_dma_slave_config; |
1328 | dma_dev->device_terminate_all = ep93xx_dma_terminate_all; | ||
1356 | dma_dev->device_issue_pending = ep93xx_dma_issue_pending; | 1329 | dma_dev->device_issue_pending = ep93xx_dma_issue_pending; |
1357 | dma_dev->device_tx_status = ep93xx_dma_tx_status; | 1330 | dma_dev->device_tx_status = ep93xx_dma_tx_status; |
1358 | 1331 | ||
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index e9ebb89e1711..09e2842d15ec 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c | |||
@@ -289,62 +289,69 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc) | |||
289 | kfree(fsl_desc); | 289 | kfree(fsl_desc); |
290 | } | 290 | } |
291 | 291 | ||
292 | static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 292 | static int fsl_edma_terminate_all(struct dma_chan *chan) |
293 | unsigned long arg) | ||
294 | { | 293 | { |
295 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | 294 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
296 | struct dma_slave_config *cfg = (void *)arg; | ||
297 | unsigned long flags; | 295 | unsigned long flags; |
298 | LIST_HEAD(head); | 296 | LIST_HEAD(head); |
299 | 297 | ||
300 | switch (cmd) { | 298 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); |
301 | case DMA_TERMINATE_ALL: | 299 | fsl_edma_disable_request(fsl_chan); |
302 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | 300 | fsl_chan->edesc = NULL; |
301 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | ||
302 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | ||
303 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | ||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static int fsl_edma_pause(struct dma_chan *chan) | ||
308 | { | ||
309 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | ||
310 | unsigned long flags; | ||
311 | |||
312 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | ||
313 | if (fsl_chan->edesc) { | ||
303 | fsl_edma_disable_request(fsl_chan); | 314 | fsl_edma_disable_request(fsl_chan); |
304 | fsl_chan->edesc = NULL; | 315 | fsl_chan->status = DMA_PAUSED; |
305 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | 316 | } |
306 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | 317 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); |
307 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | 318 | return 0; |
308 | return 0; | 319 | } |
309 | |||
310 | case DMA_SLAVE_CONFIG: | ||
311 | fsl_chan->fsc.dir = cfg->direction; | ||
312 | if (cfg->direction == DMA_DEV_TO_MEM) { | ||
313 | fsl_chan->fsc.dev_addr = cfg->src_addr; | ||
314 | fsl_chan->fsc.addr_width = cfg->src_addr_width; | ||
315 | fsl_chan->fsc.burst = cfg->src_maxburst; | ||
316 | fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width); | ||
317 | } else if (cfg->direction == DMA_MEM_TO_DEV) { | ||
318 | fsl_chan->fsc.dev_addr = cfg->dst_addr; | ||
319 | fsl_chan->fsc.addr_width = cfg->dst_addr_width; | ||
320 | fsl_chan->fsc.burst = cfg->dst_maxburst; | ||
321 | fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width); | ||
322 | } else { | ||
323 | return -EINVAL; | ||
324 | } | ||
325 | return 0; | ||
326 | 320 | ||
327 | case DMA_PAUSE: | 321 | static int fsl_edma_resume(struct dma_chan *chan) |
328 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | 322 | { |
329 | if (fsl_chan->edesc) { | 323 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
330 | fsl_edma_disable_request(fsl_chan); | 324 | unsigned long flags; |
331 | fsl_chan->status = DMA_PAUSED; | ||
332 | } | ||
333 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | ||
334 | return 0; | ||
335 | |||
336 | case DMA_RESUME: | ||
337 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | ||
338 | if (fsl_chan->edesc) { | ||
339 | fsl_edma_enable_request(fsl_chan); | ||
340 | fsl_chan->status = DMA_IN_PROGRESS; | ||
341 | } | ||
342 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | ||
343 | return 0; | ||
344 | 325 | ||
345 | default: | 326 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); |
346 | return -ENXIO; | 327 | if (fsl_chan->edesc) { |
328 | fsl_edma_enable_request(fsl_chan); | ||
329 | fsl_chan->status = DMA_IN_PROGRESS; | ||
330 | } | ||
331 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | static int fsl_edma_slave_config(struct dma_chan *chan, | ||
336 | struct dma_slave_config *cfg) | ||
337 | { | ||
338 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | ||
339 | |||
340 | fsl_chan->fsc.dir = cfg->direction; | ||
341 | if (cfg->direction == DMA_DEV_TO_MEM) { | ||
342 | fsl_chan->fsc.dev_addr = cfg->src_addr; | ||
343 | fsl_chan->fsc.addr_width = cfg->src_addr_width; | ||
344 | fsl_chan->fsc.burst = cfg->src_maxburst; | ||
345 | fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width); | ||
346 | } else if (cfg->direction == DMA_MEM_TO_DEV) { | ||
347 | fsl_chan->fsc.dev_addr = cfg->dst_addr; | ||
348 | fsl_chan->fsc.addr_width = cfg->dst_addr_width; | ||
349 | fsl_chan->fsc.burst = cfg->dst_maxburst; | ||
350 | fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width); | ||
351 | } else { | ||
352 | return -EINVAL; | ||
347 | } | 353 | } |
354 | return 0; | ||
348 | } | 355 | } |
349 | 356 | ||
350 | static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, | 357 | static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, |
@@ -780,18 +787,6 @@ static void fsl_edma_free_chan_resources(struct dma_chan *chan) | |||
780 | fsl_chan->tcd_pool = NULL; | 787 | fsl_chan->tcd_pool = NULL; |
781 | } | 788 | } |
782 | 789 | ||
783 | static int fsl_dma_device_slave_caps(struct dma_chan *dchan, | ||
784 | struct dma_slave_caps *caps) | ||
785 | { | ||
786 | caps->src_addr_widths = FSL_EDMA_BUSWIDTHS; | ||
787 | caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS; | ||
788 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
789 | caps->cmd_pause = true; | ||
790 | caps->cmd_terminate = true; | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | static int | 790 | static int |
796 | fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) | 791 | fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) |
797 | { | 792 | { |
@@ -917,9 +912,15 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
917 | fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status; | 912 | fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status; |
918 | fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg; | 913 | fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg; |
919 | fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic; | 914 | fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic; |
920 | fsl_edma->dma_dev.device_control = fsl_edma_control; | 915 | fsl_edma->dma_dev.device_config = fsl_edma_slave_config; |
916 | fsl_edma->dma_dev.device_pause = fsl_edma_pause; | ||
917 | fsl_edma->dma_dev.device_resume = fsl_edma_resume; | ||
918 | fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all; | ||
921 | fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending; | 919 | fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending; |
922 | fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps; | 920 | |
921 | fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS; | ||
922 | fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS; | ||
923 | fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
923 | 924 | ||
924 | platform_set_drvdata(pdev, fsl_edma); | 925 | platform_set_drvdata(pdev, fsl_edma); |
925 | 926 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 38821cdf862b..300f821f1890 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -941,84 +941,56 @@ fail: | |||
941 | return NULL; | 941 | return NULL; |
942 | } | 942 | } |
943 | 943 | ||
944 | /** | 944 | static int fsl_dma_device_terminate_all(struct dma_chan *dchan) |
945 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | ||
946 | * @chan: DMA channel | ||
947 | * @sgl: scatterlist to transfer to/from | ||
948 | * @sg_len: number of entries in @scatterlist | ||
949 | * @direction: DMA direction | ||
950 | * @flags: DMAEngine flags | ||
951 | * @context: transaction context (ignored) | ||
952 | * | ||
953 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
954 | * DMA_SLAVE API, this gets the device-specific information from the | ||
955 | * chan->private variable. | ||
956 | */ | ||
957 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
958 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||
959 | enum dma_transfer_direction direction, unsigned long flags, | ||
960 | void *context) | ||
961 | { | 945 | { |
962 | /* | ||
963 | * This operation is not supported on the Freescale DMA controller | ||
964 | * | ||
965 | * However, we need to provide the function pointer to allow the | ||
966 | * device_control() method to work. | ||
967 | */ | ||
968 | return NULL; | ||
969 | } | ||
970 | |||
971 | static int fsl_dma_device_control(struct dma_chan *dchan, | ||
972 | enum dma_ctrl_cmd cmd, unsigned long arg) | ||
973 | { | ||
974 | struct dma_slave_config *config; | ||
975 | struct fsldma_chan *chan; | 946 | struct fsldma_chan *chan; |
976 | int size; | ||
977 | 947 | ||
978 | if (!dchan) | 948 | if (!dchan) |
979 | return -EINVAL; | 949 | return -EINVAL; |
980 | 950 | ||
981 | chan = to_fsl_chan(dchan); | 951 | chan = to_fsl_chan(dchan); |
982 | 952 | ||
983 | switch (cmd) { | 953 | spin_lock_bh(&chan->desc_lock); |
984 | case DMA_TERMINATE_ALL: | ||
985 | spin_lock_bh(&chan->desc_lock); | ||
986 | |||
987 | /* Halt the DMA engine */ | ||
988 | dma_halt(chan); | ||
989 | 954 | ||
990 | /* Remove and free all of the descriptors in the LD queue */ | 955 | /* Halt the DMA engine */ |
991 | fsldma_free_desc_list(chan, &chan->ld_pending); | 956 | dma_halt(chan); |
992 | fsldma_free_desc_list(chan, &chan->ld_running); | ||
993 | fsldma_free_desc_list(chan, &chan->ld_completed); | ||
994 | chan->idle = true; | ||
995 | 957 | ||
996 | spin_unlock_bh(&chan->desc_lock); | 958 | /* Remove and free all of the descriptors in the LD queue */ |
997 | return 0; | 959 | fsldma_free_desc_list(chan, &chan->ld_pending); |
960 | fsldma_free_desc_list(chan, &chan->ld_running); | ||
961 | fsldma_free_desc_list(chan, &chan->ld_completed); | ||
962 | chan->idle = true; | ||
998 | 963 | ||
999 | case DMA_SLAVE_CONFIG: | 964 | spin_unlock_bh(&chan->desc_lock); |
1000 | config = (struct dma_slave_config *)arg; | 965 | return 0; |
966 | } | ||
1001 | 967 | ||
1002 | /* make sure the channel supports setting burst size */ | 968 | static int fsl_dma_device_config(struct dma_chan *dchan, |
1003 | if (!chan->set_request_count) | 969 | struct dma_slave_config *config) |
1004 | return -ENXIO; | 970 | { |
971 | struct fsldma_chan *chan; | ||
972 | int size; | ||
1005 | 973 | ||
1006 | /* we set the controller burst size depending on direction */ | 974 | if (!dchan) |
1007 | if (config->direction == DMA_MEM_TO_DEV) | 975 | return -EINVAL; |
1008 | size = config->dst_addr_width * config->dst_maxburst; | ||
1009 | else | ||
1010 | size = config->src_addr_width * config->src_maxburst; | ||
1011 | 976 | ||
1012 | chan->set_request_count(chan, size); | 977 | chan = to_fsl_chan(dchan); |
1013 | return 0; | ||
1014 | 978 | ||
1015 | default: | 979 | /* make sure the channel supports setting burst size */ |
980 | if (!chan->set_request_count) | ||
1016 | return -ENXIO; | 981 | return -ENXIO; |
1017 | } | ||
1018 | 982 | ||
983 | /* we set the controller burst size depending on direction */ | ||
984 | if (config->direction == DMA_MEM_TO_DEV) | ||
985 | size = config->dst_addr_width * config->dst_maxburst; | ||
986 | else | ||
987 | size = config->src_addr_width * config->src_maxburst; | ||
988 | |||
989 | chan->set_request_count(chan, size); | ||
1019 | return 0; | 990 | return 0; |
1020 | } | 991 | } |
1021 | 992 | ||
993 | |||
1022 | /** | 994 | /** |
1023 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command | 995 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command |
1024 | * @chan : Freescale DMA channel | 996 | * @chan : Freescale DMA channel |
@@ -1395,10 +1367,15 @@ static int fsldma_of_probe(struct platform_device *op) | |||
1395 | fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; | 1367 | fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; |
1396 | fdev->common.device_tx_status = fsl_tx_status; | 1368 | fdev->common.device_tx_status = fsl_tx_status; |
1397 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1369 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
1398 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; | 1370 | fdev->common.device_config = fsl_dma_device_config; |
1399 | fdev->common.device_control = fsl_dma_device_control; | 1371 | fdev->common.device_terminate_all = fsl_dma_device_terminate_all; |
1400 | fdev->common.dev = &op->dev; | 1372 | fdev->common.dev = &op->dev; |
1401 | 1373 | ||
1374 | fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS; | ||
1375 | fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS; | ||
1376 | fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1377 | fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
1378 | |||
1402 | dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); | 1379 | dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); |
1403 | 1380 | ||
1404 | platform_set_drvdata(op, fdev); | 1381 | platform_set_drvdata(op, fdev); |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 239c20c84382..31bffccdcc75 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
@@ -83,6 +83,10 @@ | |||
83 | #define FSL_DMA_DGSR_EOSI 0x02 | 83 | #define FSL_DMA_DGSR_EOSI 0x02 |
84 | #define FSL_DMA_DGSR_EOLSI 0x01 | 84 | #define FSL_DMA_DGSR_EOLSI 0x01 |
85 | 85 | ||
86 | #define FSL_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
87 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
88 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | ||
89 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | ||
86 | typedef u64 __bitwise v64; | 90 | typedef u64 __bitwise v64; |
87 | typedef u32 __bitwise v32; | 91 | typedef u32 __bitwise v32; |
88 | 92 | ||
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 0c4d35da1502..eed405976ea9 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -664,69 +664,67 @@ out: | |||
664 | 664 | ||
665 | } | 665 | } |
666 | 666 | ||
667 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 667 | static int imxdma_terminate_all(struct dma_chan *chan) |
668 | unsigned long arg) | ||
669 | { | 668 | { |
670 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 669 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
671 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | ||
672 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 670 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
673 | unsigned long flags; | 671 | unsigned long flags; |
674 | unsigned int mode = 0; | ||
675 | |||
676 | switch (cmd) { | ||
677 | case DMA_TERMINATE_ALL: | ||
678 | imxdma_disable_hw(imxdmac); | ||
679 | 672 | ||
680 | spin_lock_irqsave(&imxdma->lock, flags); | 673 | imxdma_disable_hw(imxdmac); |
681 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); | ||
682 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | ||
683 | spin_unlock_irqrestore(&imxdma->lock, flags); | ||
684 | return 0; | ||
685 | case DMA_SLAVE_CONFIG: | ||
686 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { | ||
687 | imxdmac->per_address = dmaengine_cfg->src_addr; | ||
688 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; | ||
689 | imxdmac->word_size = dmaengine_cfg->src_addr_width; | ||
690 | } else { | ||
691 | imxdmac->per_address = dmaengine_cfg->dst_addr; | ||
692 | imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; | ||
693 | imxdmac->word_size = dmaengine_cfg->dst_addr_width; | ||
694 | } | ||
695 | |||
696 | switch (imxdmac->word_size) { | ||
697 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
698 | mode = IMX_DMA_MEMSIZE_8; | ||
699 | break; | ||
700 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
701 | mode = IMX_DMA_MEMSIZE_16; | ||
702 | break; | ||
703 | default: | ||
704 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
705 | mode = IMX_DMA_MEMSIZE_32; | ||
706 | break; | ||
707 | } | ||
708 | 674 | ||
709 | imxdmac->hw_chaining = 0; | 675 | spin_lock_irqsave(&imxdma->lock, flags); |
676 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); | ||
677 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | ||
678 | spin_unlock_irqrestore(&imxdma->lock, flags); | ||
679 | return 0; | ||
680 | } | ||
710 | 681 | ||
711 | imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | | 682 | static int imxdma_config(struct dma_chan *chan, |
712 | ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | | 683 | struct dma_slave_config *dmaengine_cfg) |
713 | CCR_REN; | 684 | { |
714 | imxdmac->ccr_to_device = | 685 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
715 | (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | | 686 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
716 | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; | 687 | unsigned int mode = 0; |
717 | imx_dmav1_writel(imxdma, imxdmac->dma_request, | ||
718 | DMA_RSSR(imxdmac->channel)); | ||
719 | 688 | ||
720 | /* Set burst length */ | 689 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
721 | imx_dmav1_writel(imxdma, imxdmac->watermark_level * | 690 | imxdmac->per_address = dmaengine_cfg->src_addr; |
722 | imxdmac->word_size, DMA_BLR(imxdmac->channel)); | 691 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; |
692 | imxdmac->word_size = dmaengine_cfg->src_addr_width; | ||
693 | } else { | ||
694 | imxdmac->per_address = dmaengine_cfg->dst_addr; | ||
695 | imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; | ||
696 | imxdmac->word_size = dmaengine_cfg->dst_addr_width; | ||
697 | } | ||
723 | 698 | ||
724 | return 0; | 699 | switch (imxdmac->word_size) { |
700 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
701 | mode = IMX_DMA_MEMSIZE_8; | ||
702 | break; | ||
703 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
704 | mode = IMX_DMA_MEMSIZE_16; | ||
705 | break; | ||
725 | default: | 706 | default: |
726 | return -ENOSYS; | 707 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
708 | mode = IMX_DMA_MEMSIZE_32; | ||
709 | break; | ||
727 | } | 710 | } |
728 | 711 | ||
729 | return -EINVAL; | 712 | imxdmac->hw_chaining = 0; |
713 | |||
714 | imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | | ||
715 | ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | | ||
716 | CCR_REN; | ||
717 | imxdmac->ccr_to_device = | ||
718 | (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | | ||
719 | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; | ||
720 | imx_dmav1_writel(imxdma, imxdmac->dma_request, | ||
721 | DMA_RSSR(imxdmac->channel)); | ||
722 | |||
723 | /* Set burst length */ | ||
724 | imx_dmav1_writel(imxdma, imxdmac->watermark_level * | ||
725 | imxdmac->word_size, DMA_BLR(imxdmac->channel)); | ||
726 | |||
727 | return 0; | ||
730 | } | 728 | } |
731 | 729 | ||
732 | static enum dma_status imxdma_tx_status(struct dma_chan *chan, | 730 | static enum dma_status imxdma_tx_status(struct dma_chan *chan, |
@@ -1179,7 +1177,8 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
1179 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; | 1177 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; |
1180 | imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; | 1178 | imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; |
1181 | imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; | 1179 | imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; |
1182 | imxdma->dma_device.device_control = imxdma_control; | 1180 | imxdma->dma_device.device_config = imxdma_config; |
1181 | imxdma->dma_device.device_terminate_all = imxdma_terminate_all; | ||
1183 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; | 1182 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; |
1184 | 1183 | ||
1185 | platform_set_drvdata(pdev, imxdma); | 1184 | platform_set_drvdata(pdev, imxdma); |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d2432c90a566..18c0a131e4e4 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -830,20 +830,29 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
830 | return ret; | 830 | return ret; |
831 | } | 831 | } |
832 | 832 | ||
833 | static void sdma_disable_channel(struct sdma_channel *sdmac) | 833 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) |
834 | { | ||
835 | return container_of(chan, struct sdma_channel, chan); | ||
836 | } | ||
837 | |||
838 | static int sdma_disable_channel(struct dma_chan *chan) | ||
834 | { | 839 | { |
840 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
835 | struct sdma_engine *sdma = sdmac->sdma; | 841 | struct sdma_engine *sdma = sdmac->sdma; |
836 | int channel = sdmac->channel; | 842 | int channel = sdmac->channel; |
837 | 843 | ||
838 | writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); | 844 | writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); |
839 | sdmac->status = DMA_ERROR; | 845 | sdmac->status = DMA_ERROR; |
846 | |||
847 | return 0; | ||
840 | } | 848 | } |
841 | 849 | ||
842 | static int sdma_config_channel(struct sdma_channel *sdmac) | 850 | static int sdma_config_channel(struct dma_chan *chan) |
843 | { | 851 | { |
852 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
844 | int ret; | 853 | int ret; |
845 | 854 | ||
846 | sdma_disable_channel(sdmac); | 855 | sdma_disable_channel(chan); |
847 | 856 | ||
848 | sdmac->event_mask[0] = 0; | 857 | sdmac->event_mask[0] = 0; |
849 | sdmac->event_mask[1] = 0; | 858 | sdmac->event_mask[1] = 0; |
@@ -935,11 +944,6 @@ out: | |||
935 | return ret; | 944 | return ret; |
936 | } | 945 | } |
937 | 946 | ||
938 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) | ||
939 | { | ||
940 | return container_of(chan, struct sdma_channel, chan); | ||
941 | } | ||
942 | |||
943 | static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | 947 | static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) |
944 | { | 948 | { |
945 | unsigned long flags; | 949 | unsigned long flags; |
@@ -1004,7 +1008,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) | |||
1004 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1008 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1005 | struct sdma_engine *sdma = sdmac->sdma; | 1009 | struct sdma_engine *sdma = sdmac->sdma; |
1006 | 1010 | ||
1007 | sdma_disable_channel(sdmac); | 1011 | sdma_disable_channel(chan); |
1008 | 1012 | ||
1009 | if (sdmac->event_id0) | 1013 | if (sdmac->event_id0) |
1010 | sdma_event_disable(sdmac, sdmac->event_id0); | 1014 | sdma_event_disable(sdmac, sdmac->event_id0); |
@@ -1203,35 +1207,24 @@ err_out: | |||
1203 | return NULL; | 1207 | return NULL; |
1204 | } | 1208 | } |
1205 | 1209 | ||
1206 | static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1210 | static int sdma_config(struct dma_chan *chan, |
1207 | unsigned long arg) | 1211 | struct dma_slave_config *dmaengine_cfg) |
1208 | { | 1212 | { |
1209 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1213 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1210 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | ||
1211 | |||
1212 | switch (cmd) { | ||
1213 | case DMA_TERMINATE_ALL: | ||
1214 | sdma_disable_channel(sdmac); | ||
1215 | return 0; | ||
1216 | case DMA_SLAVE_CONFIG: | ||
1217 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { | ||
1218 | sdmac->per_address = dmaengine_cfg->src_addr; | ||
1219 | sdmac->watermark_level = dmaengine_cfg->src_maxburst * | ||
1220 | dmaengine_cfg->src_addr_width; | ||
1221 | sdmac->word_size = dmaengine_cfg->src_addr_width; | ||
1222 | } else { | ||
1223 | sdmac->per_address = dmaengine_cfg->dst_addr; | ||
1224 | sdmac->watermark_level = dmaengine_cfg->dst_maxburst * | ||
1225 | dmaengine_cfg->dst_addr_width; | ||
1226 | sdmac->word_size = dmaengine_cfg->dst_addr_width; | ||
1227 | } | ||
1228 | sdmac->direction = dmaengine_cfg->direction; | ||
1229 | return sdma_config_channel(sdmac); | ||
1230 | default: | ||
1231 | return -ENOSYS; | ||
1232 | } | ||
1233 | 1214 | ||
1234 | return -EINVAL; | 1215 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
1216 | sdmac->per_address = dmaengine_cfg->src_addr; | ||
1217 | sdmac->watermark_level = dmaengine_cfg->src_maxburst * | ||
1218 | dmaengine_cfg->src_addr_width; | ||
1219 | sdmac->word_size = dmaengine_cfg->src_addr_width; | ||
1220 | } else { | ||
1221 | sdmac->per_address = dmaengine_cfg->dst_addr; | ||
1222 | sdmac->watermark_level = dmaengine_cfg->dst_maxburst * | ||
1223 | dmaengine_cfg->dst_addr_width; | ||
1224 | sdmac->word_size = dmaengine_cfg->dst_addr_width; | ||
1225 | } | ||
1226 | sdmac->direction = dmaengine_cfg->direction; | ||
1227 | return sdma_config_channel(chan); | ||
1235 | } | 1228 | } |
1236 | 1229 | ||
1237 | static enum dma_status sdma_tx_status(struct dma_chan *chan, | 1230 | static enum dma_status sdma_tx_status(struct dma_chan *chan, |
@@ -1479,7 +1472,7 @@ static int sdma_probe(struct platform_device *pdev) | |||
1479 | if (ret) | 1472 | if (ret) |
1480 | return ret; | 1473 | return ret; |
1481 | 1474 | ||
1482 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); | 1475 | sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL); |
1483 | if (!sdma) | 1476 | if (!sdma) |
1484 | return -ENOMEM; | 1477 | return -ENOMEM; |
1485 | 1478 | ||
@@ -1488,48 +1481,34 @@ static int sdma_probe(struct platform_device *pdev) | |||
1488 | sdma->dev = &pdev->dev; | 1481 | sdma->dev = &pdev->dev; |
1489 | sdma->drvdata = drvdata; | 1482 | sdma->drvdata = drvdata; |
1490 | 1483 | ||
1491 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1492 | irq = platform_get_irq(pdev, 0); | 1484 | irq = platform_get_irq(pdev, 0); |
1493 | if (!iores || irq < 0) { | 1485 | if (irq < 0) |
1494 | ret = -EINVAL; | 1486 | return irq; |
1495 | goto err_irq; | ||
1496 | } | ||
1497 | 1487 | ||
1498 | if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { | 1488 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1499 | ret = -EBUSY; | 1489 | sdma->regs = devm_ioremap_resource(&pdev->dev, iores); |
1500 | goto err_request_region; | 1490 | if (IS_ERR(sdma->regs)) |
1501 | } | 1491 | return PTR_ERR(sdma->regs); |
1502 | 1492 | ||
1503 | sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); | 1493 | sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); |
1504 | if (IS_ERR(sdma->clk_ipg)) { | 1494 | if (IS_ERR(sdma->clk_ipg)) |
1505 | ret = PTR_ERR(sdma->clk_ipg); | 1495 | return PTR_ERR(sdma->clk_ipg); |
1506 | goto err_clk; | ||
1507 | } | ||
1508 | 1496 | ||
1509 | sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); | 1497 | sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); |
1510 | if (IS_ERR(sdma->clk_ahb)) { | 1498 | if (IS_ERR(sdma->clk_ahb)) |
1511 | ret = PTR_ERR(sdma->clk_ahb); | 1499 | return PTR_ERR(sdma->clk_ahb); |
1512 | goto err_clk; | ||
1513 | } | ||
1514 | 1500 | ||
1515 | clk_prepare(sdma->clk_ipg); | 1501 | clk_prepare(sdma->clk_ipg); |
1516 | clk_prepare(sdma->clk_ahb); | 1502 | clk_prepare(sdma->clk_ahb); |
1517 | 1503 | ||
1518 | sdma->regs = ioremap(iores->start, resource_size(iores)); | 1504 | ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", |
1519 | if (!sdma->regs) { | 1505 | sdma); |
1520 | ret = -ENOMEM; | ||
1521 | goto err_ioremap; | ||
1522 | } | ||
1523 | |||
1524 | ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma); | ||
1525 | if (ret) | 1506 | if (ret) |
1526 | goto err_request_irq; | 1507 | return ret; |
1527 | 1508 | ||
1528 | sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); | 1509 | sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); |
1529 | if (!sdma->script_addrs) { | 1510 | if (!sdma->script_addrs) |
1530 | ret = -ENOMEM; | 1511 | return -ENOMEM; |
1531 | goto err_alloc; | ||
1532 | } | ||
1533 | 1512 | ||
1534 | /* initially no scripts available */ | 1513 | /* initially no scripts available */ |
1535 | saddr_arr = (s32 *)sdma->script_addrs; | 1514 | saddr_arr = (s32 *)sdma->script_addrs; |
@@ -1600,7 +1579,12 @@ static int sdma_probe(struct platform_device *pdev) | |||
1600 | sdma->dma_device.device_tx_status = sdma_tx_status; | 1579 | sdma->dma_device.device_tx_status = sdma_tx_status; |
1601 | sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; | 1580 | sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; |
1602 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; | 1581 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; |
1603 | sdma->dma_device.device_control = sdma_control; | 1582 | sdma->dma_device.device_config = sdma_config; |
1583 | sdma->dma_device.device_terminate_all = sdma_disable_channel; | ||
1584 | sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
1585 | sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
1586 | sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1587 | sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1604 | sdma->dma_device.device_issue_pending = sdma_issue_pending; | 1588 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
1605 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | 1589 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; |
1606 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); | 1590 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); |
@@ -1629,38 +1613,22 @@ err_register: | |||
1629 | dma_async_device_unregister(&sdma->dma_device); | 1613 | dma_async_device_unregister(&sdma->dma_device); |
1630 | err_init: | 1614 | err_init: |
1631 | kfree(sdma->script_addrs); | 1615 | kfree(sdma->script_addrs); |
1632 | err_alloc: | ||
1633 | free_irq(irq, sdma); | ||
1634 | err_request_irq: | ||
1635 | iounmap(sdma->regs); | ||
1636 | err_ioremap: | ||
1637 | err_clk: | ||
1638 | release_mem_region(iores->start, resource_size(iores)); | ||
1639 | err_request_region: | ||
1640 | err_irq: | ||
1641 | kfree(sdma); | ||
1642 | return ret; | 1616 | return ret; |
1643 | } | 1617 | } |
1644 | 1618 | ||
1645 | static int sdma_remove(struct platform_device *pdev) | 1619 | static int sdma_remove(struct platform_device *pdev) |
1646 | { | 1620 | { |
1647 | struct sdma_engine *sdma = platform_get_drvdata(pdev); | 1621 | struct sdma_engine *sdma = platform_get_drvdata(pdev); |
1648 | struct resource *iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1649 | int irq = platform_get_irq(pdev, 0); | ||
1650 | int i; | 1622 | int i; |
1651 | 1623 | ||
1652 | dma_async_device_unregister(&sdma->dma_device); | 1624 | dma_async_device_unregister(&sdma->dma_device); |
1653 | kfree(sdma->script_addrs); | 1625 | kfree(sdma->script_addrs); |
1654 | free_irq(irq, sdma); | ||
1655 | iounmap(sdma->regs); | ||
1656 | release_mem_region(iores->start, resource_size(iores)); | ||
1657 | /* Kill the tasklet */ | 1626 | /* Kill the tasklet */ |
1658 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 1627 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
1659 | struct sdma_channel *sdmac = &sdma->channel[i]; | 1628 | struct sdma_channel *sdmac = &sdma->channel[i]; |
1660 | 1629 | ||
1661 | tasklet_kill(&sdmac->tasklet); | 1630 | tasklet_kill(&sdmac->tasklet); |
1662 | } | 1631 | } |
1663 | kfree(sdma); | ||
1664 | 1632 | ||
1665 | platform_set_drvdata(pdev, NULL); | 1633 | platform_set_drvdata(pdev, NULL); |
1666 | dev_info(&pdev->dev, "Removed...\n"); | 1634 | dev_info(&pdev->dev, "Removed...\n"); |
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 1aab8130efa1..5aaead9b56f7 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -492,10 +492,10 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | |||
492 | return ret; | 492 | return ret; |
493 | } | 493 | } |
494 | 494 | ||
495 | static int dma_slave_control(struct dma_chan *chan, unsigned long arg) | 495 | static int intel_mid_dma_config(struct dma_chan *chan, |
496 | struct dma_slave_config *slave) | ||
496 | { | 497 | { |
497 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | 498 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); |
498 | struct dma_slave_config *slave = (struct dma_slave_config *)arg; | ||
499 | struct intel_mid_dma_slave *mid_slave; | 499 | struct intel_mid_dma_slave *mid_slave; |
500 | 500 | ||
501 | BUG_ON(!midc); | 501 | BUG_ON(!midc); |
@@ -509,28 +509,14 @@ static int dma_slave_control(struct dma_chan *chan, unsigned long arg) | |||
509 | midc->mid_slave = mid_slave; | 509 | midc->mid_slave = mid_slave; |
510 | return 0; | 510 | return 0; |
511 | } | 511 | } |
512 | /** | 512 | |
513 | * intel_mid_dma_device_control - DMA device control | 513 | static int intel_mid_dma_terminate_all(struct dma_chan *chan) |
514 | * @chan: chan for DMA control | ||
515 | * @cmd: control cmd | ||
516 | * @arg: cmd arg value | ||
517 | * | ||
518 | * Perform DMA control command | ||
519 | */ | ||
520 | static int intel_mid_dma_device_control(struct dma_chan *chan, | ||
521 | enum dma_ctrl_cmd cmd, unsigned long arg) | ||
522 | { | 514 | { |
523 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | 515 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); |
524 | struct middma_device *mid = to_middma_device(chan->device); | 516 | struct middma_device *mid = to_middma_device(chan->device); |
525 | struct intel_mid_dma_desc *desc, *_desc; | 517 | struct intel_mid_dma_desc *desc, *_desc; |
526 | union intel_mid_dma_cfg_lo cfg_lo; | 518 | union intel_mid_dma_cfg_lo cfg_lo; |
527 | 519 | ||
528 | if (cmd == DMA_SLAVE_CONFIG) | ||
529 | return dma_slave_control(chan, arg); | ||
530 | |||
531 | if (cmd != DMA_TERMINATE_ALL) | ||
532 | return -ENXIO; | ||
533 | |||
534 | spin_lock_bh(&midc->lock); | 520 | spin_lock_bh(&midc->lock); |
535 | if (midc->busy == false) { | 521 | if (midc->busy == false) { |
536 | spin_unlock_bh(&midc->lock); | 522 | spin_unlock_bh(&midc->lock); |
@@ -1148,7 +1134,8 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1148 | dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; | 1134 | dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; |
1149 | dma->common.device_issue_pending = intel_mid_dma_issue_pending; | 1135 | dma->common.device_issue_pending = intel_mid_dma_issue_pending; |
1150 | dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; | 1136 | dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; |
1151 | dma->common.device_control = intel_mid_dma_device_control; | 1137 | dma->common.device_config = intel_mid_dma_config; |
1138 | dma->common.device_terminate_all = intel_mid_dma_terminate_all; | ||
1152 | 1139 | ||
1153 | /*enable dma cntrl*/ | 1140 | /*enable dma cntrl*/ |
1154 | iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); | 1141 | iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index c2b017ad139d..b54f62de9232 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1398,76 +1398,81 @@ static void idmac_issue_pending(struct dma_chan *chan) | |||
1398 | */ | 1398 | */ |
1399 | } | 1399 | } |
1400 | 1400 | ||
1401 | static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1401 | static int idmac_pause(struct dma_chan *chan) |
1402 | unsigned long arg) | ||
1403 | { | 1402 | { |
1404 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1403 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1405 | struct idmac *idmac = to_idmac(chan->device); | 1404 | struct idmac *idmac = to_idmac(chan->device); |
1406 | struct ipu *ipu = to_ipu(idmac); | 1405 | struct ipu *ipu = to_ipu(idmac); |
1407 | struct list_head *list, *tmp; | 1406 | struct list_head *list, *tmp; |
1408 | unsigned long flags; | 1407 | unsigned long flags; |
1409 | int i; | ||
1410 | 1408 | ||
1411 | switch (cmd) { | 1409 | mutex_lock(&ichan->chan_mutex); |
1412 | case DMA_PAUSE: | ||
1413 | spin_lock_irqsave(&ipu->lock, flags); | ||
1414 | ipu_ic_disable_task(ipu, chan->chan_id); | ||
1415 | 1410 | ||
1416 | /* Return all descriptors into "prepared" state */ | 1411 | spin_lock_irqsave(&ipu->lock, flags); |
1417 | list_for_each_safe(list, tmp, &ichan->queue) | 1412 | ipu_ic_disable_task(ipu, chan->chan_id); |
1418 | list_del_init(list); | ||
1419 | 1413 | ||
1420 | ichan->sg[0] = NULL; | 1414 | /* Return all descriptors into "prepared" state */ |
1421 | ichan->sg[1] = NULL; | 1415 | list_for_each_safe(list, tmp, &ichan->queue) |
1416 | list_del_init(list); | ||
1422 | 1417 | ||
1423 | spin_unlock_irqrestore(&ipu->lock, flags); | 1418 | ichan->sg[0] = NULL; |
1419 | ichan->sg[1] = NULL; | ||
1424 | 1420 | ||
1425 | ichan->status = IPU_CHANNEL_INITIALIZED; | 1421 | spin_unlock_irqrestore(&ipu->lock, flags); |
1426 | break; | ||
1427 | case DMA_TERMINATE_ALL: | ||
1428 | ipu_disable_channel(idmac, ichan, | ||
1429 | ichan->status >= IPU_CHANNEL_ENABLED); | ||
1430 | 1422 | ||
1431 | tasklet_disable(&ipu->tasklet); | 1423 | ichan->status = IPU_CHANNEL_INITIALIZED; |
1432 | 1424 | ||
1433 | /* ichan->queue is modified in ISR, have to spinlock */ | 1425 | mutex_unlock(&ichan->chan_mutex); |
1434 | spin_lock_irqsave(&ichan->lock, flags); | ||
1435 | list_splice_init(&ichan->queue, &ichan->free_list); | ||
1436 | 1426 | ||
1437 | if (ichan->desc) | 1427 | return 0; |
1438 | for (i = 0; i < ichan->n_tx_desc; i++) { | 1428 | } |
1439 | struct idmac_tx_desc *desc = ichan->desc + i; | ||
1440 | if (list_empty(&desc->list)) | ||
1441 | /* Descriptor was prepared, but not submitted */ | ||
1442 | list_add(&desc->list, &ichan->free_list); | ||
1443 | 1429 | ||
1444 | async_tx_clear_ack(&desc->txd); | 1430 | static int __idmac_terminate_all(struct dma_chan *chan) |
1445 | } | 1431 | { |
1432 | struct idmac_channel *ichan = to_idmac_chan(chan); | ||
1433 | struct idmac *idmac = to_idmac(chan->device); | ||
1434 | struct ipu *ipu = to_ipu(idmac); | ||
1435 | unsigned long flags; | ||
1436 | int i; | ||
1446 | 1437 | ||
1447 | ichan->sg[0] = NULL; | 1438 | ipu_disable_channel(idmac, ichan, |
1448 | ichan->sg[1] = NULL; | 1439 | ichan->status >= IPU_CHANNEL_ENABLED); |
1449 | spin_unlock_irqrestore(&ichan->lock, flags); | ||
1450 | 1440 | ||
1451 | tasklet_enable(&ipu->tasklet); | 1441 | tasklet_disable(&ipu->tasklet); |
1452 | 1442 | ||
1453 | ichan->status = IPU_CHANNEL_INITIALIZED; | 1443 | /* ichan->queue is modified in ISR, have to spinlock */ |
1454 | break; | 1444 | spin_lock_irqsave(&ichan->lock, flags); |
1455 | default: | 1445 | list_splice_init(&ichan->queue, &ichan->free_list); |
1456 | return -ENOSYS; | 1446 | |
1457 | } | 1447 | if (ichan->desc) |
1448 | for (i = 0; i < ichan->n_tx_desc; i++) { | ||
1449 | struct idmac_tx_desc *desc = ichan->desc + i; | ||
1450 | if (list_empty(&desc->list)) | ||
1451 | /* Descriptor was prepared, but not submitted */ | ||
1452 | list_add(&desc->list, &ichan->free_list); | ||
1453 | |||
1454 | async_tx_clear_ack(&desc->txd); | ||
1455 | } | ||
1456 | |||
1457 | ichan->sg[0] = NULL; | ||
1458 | ichan->sg[1] = NULL; | ||
1459 | spin_unlock_irqrestore(&ichan->lock, flags); | ||
1460 | |||
1461 | tasklet_enable(&ipu->tasklet); | ||
1462 | |||
1463 | ichan->status = IPU_CHANNEL_INITIALIZED; | ||
1458 | 1464 | ||
1459 | return 0; | 1465 | return 0; |
1460 | } | 1466 | } |
1461 | 1467 | ||
1462 | static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1468 | static int idmac_terminate_all(struct dma_chan *chan) |
1463 | unsigned long arg) | ||
1464 | { | 1469 | { |
1465 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1470 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1466 | int ret; | 1471 | int ret; |
1467 | 1472 | ||
1468 | mutex_lock(&ichan->chan_mutex); | 1473 | mutex_lock(&ichan->chan_mutex); |
1469 | 1474 | ||
1470 | ret = __idmac_control(chan, cmd, arg); | 1475 | ret = __idmac_terminate_all(chan); |
1471 | 1476 | ||
1472 | mutex_unlock(&ichan->chan_mutex); | 1477 | mutex_unlock(&ichan->chan_mutex); |
1473 | 1478 | ||
@@ -1568,7 +1573,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan) | |||
1568 | 1573 | ||
1569 | mutex_lock(&ichan->chan_mutex); | 1574 | mutex_lock(&ichan->chan_mutex); |
1570 | 1575 | ||
1571 | __idmac_control(chan, DMA_TERMINATE_ALL, 0); | 1576 | __idmac_terminate_all(chan); |
1572 | 1577 | ||
1573 | if (ichan->status > IPU_CHANNEL_FREE) { | 1578 | if (ichan->status > IPU_CHANNEL_FREE) { |
1574 | #ifdef DEBUG | 1579 | #ifdef DEBUG |
@@ -1622,7 +1627,8 @@ static int __init ipu_idmac_init(struct ipu *ipu) | |||
1622 | 1627 | ||
1623 | /* Compulsory for DMA_SLAVE fields */ | 1628 | /* Compulsory for DMA_SLAVE fields */ |
1624 | dma->device_prep_slave_sg = idmac_prep_slave_sg; | 1629 | dma->device_prep_slave_sg = idmac_prep_slave_sg; |
1625 | dma->device_control = idmac_control; | 1630 | dma->device_pause = idmac_pause; |
1631 | dma->device_terminate_all = idmac_terminate_all; | ||
1626 | 1632 | ||
1627 | INIT_LIST_HEAD(&dma->channels); | 1633 | INIT_LIST_HEAD(&dma->channels); |
1628 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { | 1634 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { |
@@ -1655,7 +1661,7 @@ static void ipu_idmac_exit(struct ipu *ipu) | |||
1655 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { | 1661 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { |
1656 | struct idmac_channel *ichan = ipu->channel + i; | 1662 | struct idmac_channel *ichan = ipu->channel + i; |
1657 | 1663 | ||
1658 | idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0); | 1664 | idmac_terminate_all(&ichan->dma_chan); |
1659 | } | 1665 | } |
1660 | 1666 | ||
1661 | dma_async_device_unregister(&idmac->dma); | 1667 | dma_async_device_unregister(&idmac->dma); |
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a1de14ab2c51..6f7f43529ccb 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c | |||
@@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( | |||
441 | num = 0; | 441 | num = 0; |
442 | 442 | ||
443 | if (!c->ccfg) { | 443 | if (!c->ccfg) { |
444 | /* default is memtomem, without calling device_control */ | 444 | /* default is memtomem, without calling device_config */ |
445 | c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; | 445 | c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; |
446 | c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ | 446 | c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ |
447 | c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ | 447 | c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ |
@@ -523,112 +523,126 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( | |||
523 | return vchan_tx_prep(&c->vc, &ds->vd, flags); | 523 | return vchan_tx_prep(&c->vc, &ds->vd, flags); |
524 | } | 524 | } |
525 | 525 | ||
526 | static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 526 | static int k3_dma_config(struct dma_chan *chan, |
527 | unsigned long arg) | 527 | struct dma_slave_config *cfg) |
528 | { | ||
529 | struct k3_dma_chan *c = to_k3_chan(chan); | ||
530 | u32 maxburst = 0, val = 0; | ||
531 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
532 | |||
533 | if (cfg == NULL) | ||
534 | return -EINVAL; | ||
535 | c->dir = cfg->direction; | ||
536 | if (c->dir == DMA_DEV_TO_MEM) { | ||
537 | c->ccfg = CX_CFG_DSTINCR; | ||
538 | c->dev_addr = cfg->src_addr; | ||
539 | maxburst = cfg->src_maxburst; | ||
540 | width = cfg->src_addr_width; | ||
541 | } else if (c->dir == DMA_MEM_TO_DEV) { | ||
542 | c->ccfg = CX_CFG_SRCINCR; | ||
543 | c->dev_addr = cfg->dst_addr; | ||
544 | maxburst = cfg->dst_maxburst; | ||
545 | width = cfg->dst_addr_width; | ||
546 | } | ||
547 | switch (width) { | ||
548 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
549 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
550 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
551 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
552 | val = __ffs(width); | ||
553 | break; | ||
554 | default: | ||
555 | val = 3; | ||
556 | break; | ||
557 | } | ||
558 | c->ccfg |= (val << 12) | (val << 16); | ||
559 | |||
560 | if ((maxburst == 0) || (maxburst > 16)) | ||
561 | val = 16; | ||
562 | else | ||
563 | val = maxburst - 1; | ||
564 | c->ccfg |= (val << 20) | (val << 24); | ||
565 | c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; | ||
566 | |||
567 | /* specific request line */ | ||
568 | c->ccfg |= c->vc.chan.chan_id << 4; | ||
569 | |||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | static int k3_dma_terminate_all(struct dma_chan *chan) | ||
528 | { | 574 | { |
529 | struct k3_dma_chan *c = to_k3_chan(chan); | 575 | struct k3_dma_chan *c = to_k3_chan(chan); |
530 | struct k3_dma_dev *d = to_k3_dma(chan->device); | 576 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
531 | struct dma_slave_config *cfg = (void *)arg; | ||
532 | struct k3_dma_phy *p = c->phy; | 577 | struct k3_dma_phy *p = c->phy; |
533 | unsigned long flags; | 578 | unsigned long flags; |
534 | u32 maxburst = 0, val = 0; | ||
535 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
536 | LIST_HEAD(head); | 579 | LIST_HEAD(head); |
537 | 580 | ||
538 | switch (cmd) { | 581 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); |
539 | case DMA_SLAVE_CONFIG: | ||
540 | if (cfg == NULL) | ||
541 | return -EINVAL; | ||
542 | c->dir = cfg->direction; | ||
543 | if (c->dir == DMA_DEV_TO_MEM) { | ||
544 | c->ccfg = CX_CFG_DSTINCR; | ||
545 | c->dev_addr = cfg->src_addr; | ||
546 | maxburst = cfg->src_maxburst; | ||
547 | width = cfg->src_addr_width; | ||
548 | } else if (c->dir == DMA_MEM_TO_DEV) { | ||
549 | c->ccfg = CX_CFG_SRCINCR; | ||
550 | c->dev_addr = cfg->dst_addr; | ||
551 | maxburst = cfg->dst_maxburst; | ||
552 | width = cfg->dst_addr_width; | ||
553 | } | ||
554 | switch (width) { | ||
555 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
556 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
557 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
558 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
559 | val = __ffs(width); | ||
560 | break; | ||
561 | default: | ||
562 | val = 3; | ||
563 | break; | ||
564 | } | ||
565 | c->ccfg |= (val << 12) | (val << 16); | ||
566 | 582 | ||
567 | if ((maxburst == 0) || (maxburst > 16)) | 583 | /* Prevent this channel being scheduled */ |
568 | val = 16; | 584 | spin_lock(&d->lock); |
569 | else | 585 | list_del_init(&c->node); |
570 | val = maxburst - 1; | 586 | spin_unlock(&d->lock); |
571 | c->ccfg |= (val << 20) | (val << 24); | ||
572 | c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; | ||
573 | 587 | ||
574 | /* specific request line */ | 588 | /* Clear the tx descriptor lists */ |
575 | c->ccfg |= c->vc.chan.chan_id << 4; | 589 | spin_lock_irqsave(&c->vc.lock, flags); |
576 | break; | 590 | vchan_get_all_descriptors(&c->vc, &head); |
591 | if (p) { | ||
592 | /* vchan is assigned to a pchan - stop the channel */ | ||
593 | k3_dma_terminate_chan(p, d); | ||
594 | c->phy = NULL; | ||
595 | p->vchan = NULL; | ||
596 | p->ds_run = p->ds_done = NULL; | ||
597 | } | ||
598 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
599 | vchan_dma_desc_free_list(&c->vc, &head); | ||
577 | 600 | ||
578 | case DMA_TERMINATE_ALL: | 601 | return 0; |
579 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); | 602 | } |
580 | 603 | ||
581 | /* Prevent this channel being scheduled */ | 604 | static int k3_dma_transfer_pause(struct dma_chan *chan) |
582 | spin_lock(&d->lock); | 605 | { |
583 | list_del_init(&c->node); | 606 | struct k3_dma_chan *c = to_k3_chan(chan); |
584 | spin_unlock(&d->lock); | 607 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
608 | struct k3_dma_phy *p = c->phy; | ||
585 | 609 | ||
586 | /* Clear the tx descriptor lists */ | 610 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); |
587 | spin_lock_irqsave(&c->vc.lock, flags); | 611 | if (c->status == DMA_IN_PROGRESS) { |
588 | vchan_get_all_descriptors(&c->vc, &head); | 612 | c->status = DMA_PAUSED; |
589 | if (p) { | 613 | if (p) { |
590 | /* vchan is assigned to a pchan - stop the channel */ | 614 | k3_dma_pause_dma(p, false); |
591 | k3_dma_terminate_chan(p, d); | 615 | } else { |
592 | c->phy = NULL; | 616 | spin_lock(&d->lock); |
593 | p->vchan = NULL; | 617 | list_del_init(&c->node); |
594 | p->ds_run = p->ds_done = NULL; | 618 | spin_unlock(&d->lock); |
595 | } | 619 | } |
596 | spin_unlock_irqrestore(&c->vc.lock, flags); | 620 | } |
597 | vchan_dma_desc_free_list(&c->vc, &head); | ||
598 | break; | ||
599 | 621 | ||
600 | case DMA_PAUSE: | 622 | return 0; |
601 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); | 623 | } |
602 | if (c->status == DMA_IN_PROGRESS) { | ||
603 | c->status = DMA_PAUSED; | ||
604 | if (p) { | ||
605 | k3_dma_pause_dma(p, false); | ||
606 | } else { | ||
607 | spin_lock(&d->lock); | ||
608 | list_del_init(&c->node); | ||
609 | spin_unlock(&d->lock); | ||
610 | } | ||
611 | } | ||
612 | break; | ||
613 | 624 | ||
614 | case DMA_RESUME: | 625 | static int k3_dma_transfer_resume(struct dma_chan *chan) |
615 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); | 626 | { |
616 | spin_lock_irqsave(&c->vc.lock, flags); | 627 | struct k3_dma_chan *c = to_k3_chan(chan); |
617 | if (c->status == DMA_PAUSED) { | 628 | struct k3_dma_dev *d = to_k3_dma(chan->device); |
618 | c->status = DMA_IN_PROGRESS; | 629 | struct k3_dma_phy *p = c->phy; |
619 | if (p) { | 630 | unsigned long flags; |
620 | k3_dma_pause_dma(p, true); | 631 | |
621 | } else if (!list_empty(&c->vc.desc_issued)) { | 632 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); |
622 | spin_lock(&d->lock); | 633 | spin_lock_irqsave(&c->vc.lock, flags); |
623 | list_add_tail(&c->node, &d->chan_pending); | 634 | if (c->status == DMA_PAUSED) { |
624 | spin_unlock(&d->lock); | 635 | c->status = DMA_IN_PROGRESS; |
625 | } | 636 | if (p) { |
637 | k3_dma_pause_dma(p, true); | ||
638 | } else if (!list_empty(&c->vc.desc_issued)) { | ||
639 | spin_lock(&d->lock); | ||
640 | list_add_tail(&c->node, &d->chan_pending); | ||
641 | spin_unlock(&d->lock); | ||
626 | } | 642 | } |
627 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
628 | break; | ||
629 | default: | ||
630 | return -ENXIO; | ||
631 | } | 643 | } |
644 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
645 | |||
632 | return 0; | 646 | return 0; |
633 | } | 647 | } |
634 | 648 | ||
@@ -720,7 +734,10 @@ static int k3_dma_probe(struct platform_device *op) | |||
720 | d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; | 734 | d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; |
721 | d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; | 735 | d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; |
722 | d->slave.device_issue_pending = k3_dma_issue_pending; | 736 | d->slave.device_issue_pending = k3_dma_issue_pending; |
723 | d->slave.device_control = k3_dma_control; | 737 | d->slave.device_config = k3_dma_config; |
738 | d->slave.device_pause = k3_dma_transfer_pause; | ||
739 | d->slave.device_resume = k3_dma_transfer_resume; | ||
740 | d->slave.device_terminate_all = k3_dma_terminate_all; | ||
724 | d->slave.copy_align = DMA_ALIGN; | 741 | d->slave.copy_align = DMA_ALIGN; |
725 | 742 | ||
726 | /* init virtual channel */ | 743 | /* init virtual channel */ |
@@ -787,7 +804,7 @@ static int k3_dma_remove(struct platform_device *op) | |||
787 | } | 804 | } |
788 | 805 | ||
789 | #ifdef CONFIG_PM_SLEEP | 806 | #ifdef CONFIG_PM_SLEEP |
790 | static int k3_dma_suspend(struct device *dev) | 807 | static int k3_dma_suspend_dev(struct device *dev) |
791 | { | 808 | { |
792 | struct k3_dma_dev *d = dev_get_drvdata(dev); | 809 | struct k3_dma_dev *d = dev_get_drvdata(dev); |
793 | u32 stat = 0; | 810 | u32 stat = 0; |
@@ -803,7 +820,7 @@ static int k3_dma_suspend(struct device *dev) | |||
803 | return 0; | 820 | return 0; |
804 | } | 821 | } |
805 | 822 | ||
806 | static int k3_dma_resume(struct device *dev) | 823 | static int k3_dma_resume_dev(struct device *dev) |
807 | { | 824 | { |
808 | struct k3_dma_dev *d = dev_get_drvdata(dev); | 825 | struct k3_dma_dev *d = dev_get_drvdata(dev); |
809 | int ret = 0; | 826 | int ret = 0; |
@@ -818,7 +835,7 @@ static int k3_dma_resume(struct device *dev) | |||
818 | } | 835 | } |
819 | #endif | 836 | #endif |
820 | 837 | ||
821 | static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); | 838 | static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev); |
822 | 839 | ||
823 | static struct platform_driver k3_pdma_driver = { | 840 | static struct platform_driver k3_pdma_driver = { |
824 | .driver = { | 841 | .driver = { |
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 8b8952f35e6c..8926f271904e 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -683,68 +683,70 @@ fail: | |||
683 | return NULL; | 683 | return NULL; |
684 | } | 684 | } |
685 | 685 | ||
686 | static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | 686 | static int mmp_pdma_config(struct dma_chan *dchan, |
687 | unsigned long arg) | 687 | struct dma_slave_config *cfg) |
688 | { | 688 | { |
689 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 689 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
690 | struct dma_slave_config *cfg = (void *)arg; | ||
691 | unsigned long flags; | ||
692 | u32 maxburst = 0, addr = 0; | 690 | u32 maxburst = 0, addr = 0; |
693 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | 691 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; |
694 | 692 | ||
695 | if (!dchan) | 693 | if (!dchan) |
696 | return -EINVAL; | 694 | return -EINVAL; |
697 | 695 | ||
698 | switch (cmd) { | 696 | if (cfg->direction == DMA_DEV_TO_MEM) { |
699 | case DMA_TERMINATE_ALL: | 697 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; |
700 | disable_chan(chan->phy); | 698 | maxburst = cfg->src_maxburst; |
701 | mmp_pdma_free_phy(chan); | 699 | width = cfg->src_addr_width; |
702 | spin_lock_irqsave(&chan->desc_lock, flags); | 700 | addr = cfg->src_addr; |
703 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | 701 | } else if (cfg->direction == DMA_MEM_TO_DEV) { |
704 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | 702 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; |
705 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 703 | maxburst = cfg->dst_maxburst; |
706 | chan->idle = true; | 704 | width = cfg->dst_addr_width; |
707 | break; | 705 | addr = cfg->dst_addr; |
708 | case DMA_SLAVE_CONFIG: | ||
709 | if (cfg->direction == DMA_DEV_TO_MEM) { | ||
710 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; | ||
711 | maxburst = cfg->src_maxburst; | ||
712 | width = cfg->src_addr_width; | ||
713 | addr = cfg->src_addr; | ||
714 | } else if (cfg->direction == DMA_MEM_TO_DEV) { | ||
715 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; | ||
716 | maxburst = cfg->dst_maxburst; | ||
717 | width = cfg->dst_addr_width; | ||
718 | addr = cfg->dst_addr; | ||
719 | } | ||
720 | |||
721 | if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) | ||
722 | chan->dcmd |= DCMD_WIDTH1; | ||
723 | else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | ||
724 | chan->dcmd |= DCMD_WIDTH2; | ||
725 | else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
726 | chan->dcmd |= DCMD_WIDTH4; | ||
727 | |||
728 | if (maxburst == 8) | ||
729 | chan->dcmd |= DCMD_BURST8; | ||
730 | else if (maxburst == 16) | ||
731 | chan->dcmd |= DCMD_BURST16; | ||
732 | else if (maxburst == 32) | ||
733 | chan->dcmd |= DCMD_BURST32; | ||
734 | |||
735 | chan->dir = cfg->direction; | ||
736 | chan->dev_addr = addr; | ||
737 | /* FIXME: drivers should be ported over to use the filter | ||
738 | * function. Once that's done, the following two lines can | ||
739 | * be removed. | ||
740 | */ | ||
741 | if (cfg->slave_id) | ||
742 | chan->drcmr = cfg->slave_id; | ||
743 | break; | ||
744 | default: | ||
745 | return -ENOSYS; | ||
746 | } | 706 | } |
747 | 707 | ||
708 | if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) | ||
709 | chan->dcmd |= DCMD_WIDTH1; | ||
710 | else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | ||
711 | chan->dcmd |= DCMD_WIDTH2; | ||
712 | else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
713 | chan->dcmd |= DCMD_WIDTH4; | ||
714 | |||
715 | if (maxburst == 8) | ||
716 | chan->dcmd |= DCMD_BURST8; | ||
717 | else if (maxburst == 16) | ||
718 | chan->dcmd |= DCMD_BURST16; | ||
719 | else if (maxburst == 32) | ||
720 | chan->dcmd |= DCMD_BURST32; | ||
721 | |||
722 | chan->dir = cfg->direction; | ||
723 | chan->dev_addr = addr; | ||
724 | /* FIXME: drivers should be ported over to use the filter | ||
725 | * function. Once that's done, the following two lines can | ||
726 | * be removed. | ||
727 | */ | ||
728 | if (cfg->slave_id) | ||
729 | chan->drcmr = cfg->slave_id; | ||
730 | |||
731 | return 0; | ||
732 | } | ||
733 | |||
734 | static int mmp_pdma_terminate_all(struct dma_chan *dchan) | ||
735 | { | ||
736 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
737 | unsigned long flags; | ||
738 | |||
739 | if (!dchan) | ||
740 | return -EINVAL; | ||
741 | |||
742 | disable_chan(chan->phy); | ||
743 | mmp_pdma_free_phy(chan); | ||
744 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
745 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | ||
746 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | ||
747 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
748 | chan->idle = true; | ||
749 | |||
748 | return 0; | 750 | return 0; |
749 | } | 751 | } |
750 | 752 | ||
@@ -1061,7 +1063,8 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
1061 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; | 1063 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; |
1062 | pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; | 1064 | pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; |
1063 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; | 1065 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; |
1064 | pdev->device.device_control = mmp_pdma_control; | 1066 | pdev->device.device_config = mmp_pdma_config; |
1067 | pdev->device.device_terminate_all = mmp_pdma_terminate_all; | ||
1065 | pdev->device.copy_align = PDMA_ALIGNMENT; | 1068 | pdev->device.copy_align = PDMA_ALIGNMENT; |
1066 | 1069 | ||
1067 | if (pdev->dev->coherent_dma_mask) | 1070 | if (pdev->dev->coherent_dma_mask) |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index bfb46957c3dc..70c2fa9963cd 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/dmaengine.h> | 19 | #include <linux/dmaengine.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/device.h> | 21 | #include <linux/device.h> |
22 | #include <mach/regs-icu.h> | ||
23 | #include <linux/platform_data/dma-mmp_tdma.h> | 22 | #include <linux/platform_data/dma-mmp_tdma.h> |
24 | #include <linux/of_device.h> | 23 | #include <linux/of_device.h> |
25 | #include <linux/of_dma.h> | 24 | #include <linux/of_dma.h> |
@@ -164,33 +163,46 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) | |||
164 | tdmac->status = DMA_IN_PROGRESS; | 163 | tdmac->status = DMA_IN_PROGRESS; |
165 | } | 164 | } |
166 | 165 | ||
167 | static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) | 166 | static int mmp_tdma_disable_chan(struct dma_chan *chan) |
168 | { | 167 | { |
168 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
169 | |||
169 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, | 170 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, |
170 | tdmac->reg_base + TDCR); | 171 | tdmac->reg_base + TDCR); |
171 | 172 | ||
172 | tdmac->status = DMA_COMPLETE; | 173 | tdmac->status = DMA_COMPLETE; |
174 | |||
175 | return 0; | ||
173 | } | 176 | } |
174 | 177 | ||
175 | static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) | 178 | static int mmp_tdma_resume_chan(struct dma_chan *chan) |
176 | { | 179 | { |
180 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
181 | |||
177 | writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, | 182 | writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, |
178 | tdmac->reg_base + TDCR); | 183 | tdmac->reg_base + TDCR); |
179 | tdmac->status = DMA_IN_PROGRESS; | 184 | tdmac->status = DMA_IN_PROGRESS; |
185 | |||
186 | return 0; | ||
180 | } | 187 | } |
181 | 188 | ||
182 | static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac) | 189 | static int mmp_tdma_pause_chan(struct dma_chan *chan) |
183 | { | 190 | { |
191 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
192 | |||
184 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, | 193 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, |
185 | tdmac->reg_base + TDCR); | 194 | tdmac->reg_base + TDCR); |
186 | tdmac->status = DMA_PAUSED; | 195 | tdmac->status = DMA_PAUSED; |
196 | |||
197 | return 0; | ||
187 | } | 198 | } |
188 | 199 | ||
189 | static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) | 200 | static int mmp_tdma_config_chan(struct dma_chan *chan) |
190 | { | 201 | { |
202 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
191 | unsigned int tdcr = 0; | 203 | unsigned int tdcr = 0; |
192 | 204 | ||
193 | mmp_tdma_disable_chan(tdmac); | 205 | mmp_tdma_disable_chan(chan); |
194 | 206 | ||
195 | if (tdmac->dir == DMA_MEM_TO_DEV) | 207 | if (tdmac->dir == DMA_MEM_TO_DEV) |
196 | tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC; | 208 | tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC; |
@@ -452,42 +464,34 @@ err_out: | |||
452 | return NULL; | 464 | return NULL; |
453 | } | 465 | } |
454 | 466 | ||
455 | static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 467 | static int mmp_tdma_terminate_all(struct dma_chan *chan) |
456 | unsigned long arg) | ||
457 | { | 468 | { |
458 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | 469 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); |
459 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | 470 | |
460 | int ret = 0; | 471 | mmp_tdma_disable_chan(chan); |
461 | 472 | /* disable interrupt */ | |
462 | switch (cmd) { | 473 | mmp_tdma_enable_irq(tdmac, false); |
463 | case DMA_TERMINATE_ALL: | 474 | |
464 | mmp_tdma_disable_chan(tdmac); | 475 | return 0; |
465 | /* disable interrupt */ | 476 | } |
466 | mmp_tdma_enable_irq(tdmac, false); | 477 | |
467 | break; | 478 | static int mmp_tdma_config(struct dma_chan *chan, |
468 | case DMA_PAUSE: | 479 | struct dma_slave_config *dmaengine_cfg) |
469 | mmp_tdma_pause_chan(tdmac); | 480 | { |
470 | break; | 481 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); |
471 | case DMA_RESUME: | 482 | |
472 | mmp_tdma_resume_chan(tdmac); | 483 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
473 | break; | 484 | tdmac->dev_addr = dmaengine_cfg->src_addr; |
474 | case DMA_SLAVE_CONFIG: | 485 | tdmac->burst_sz = dmaengine_cfg->src_maxburst; |
475 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { | 486 | tdmac->buswidth = dmaengine_cfg->src_addr_width; |
476 | tdmac->dev_addr = dmaengine_cfg->src_addr; | 487 | } else { |
477 | tdmac->burst_sz = dmaengine_cfg->src_maxburst; | 488 | tdmac->dev_addr = dmaengine_cfg->dst_addr; |
478 | tdmac->buswidth = dmaengine_cfg->src_addr_width; | 489 | tdmac->burst_sz = dmaengine_cfg->dst_maxburst; |
479 | } else { | 490 | tdmac->buswidth = dmaengine_cfg->dst_addr_width; |
480 | tdmac->dev_addr = dmaengine_cfg->dst_addr; | ||
481 | tdmac->burst_sz = dmaengine_cfg->dst_maxburst; | ||
482 | tdmac->buswidth = dmaengine_cfg->dst_addr_width; | ||
483 | } | ||
484 | tdmac->dir = dmaengine_cfg->direction; | ||
485 | return mmp_tdma_config_chan(tdmac); | ||
486 | default: | ||
487 | ret = -ENOSYS; | ||
488 | } | 491 | } |
492 | tdmac->dir = dmaengine_cfg->direction; | ||
489 | 493 | ||
490 | return ret; | 494 | return mmp_tdma_config_chan(chan); |
491 | } | 495 | } |
492 | 496 | ||
493 | static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, | 497 | static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, |
@@ -668,7 +672,10 @@ static int mmp_tdma_probe(struct platform_device *pdev) | |||
668 | tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; | 672 | tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; |
669 | tdev->device.device_tx_status = mmp_tdma_tx_status; | 673 | tdev->device.device_tx_status = mmp_tdma_tx_status; |
670 | tdev->device.device_issue_pending = mmp_tdma_issue_pending; | 674 | tdev->device.device_issue_pending = mmp_tdma_issue_pending; |
671 | tdev->device.device_control = mmp_tdma_control; | 675 | tdev->device.device_config = mmp_tdma_config; |
676 | tdev->device.device_pause = mmp_tdma_pause_chan; | ||
677 | tdev->device.device_resume = mmp_tdma_resume_chan; | ||
678 | tdev->device.device_terminate_all = mmp_tdma_terminate_all; | ||
672 | tdev->device.copy_align = TDMA_ALIGNMENT; | 679 | tdev->device.copy_align = TDMA_ALIGNMENT; |
673 | 680 | ||
674 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); | 681 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); |
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index 53032bac06e0..15cab7d79525 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c | |||
@@ -263,28 +263,6 @@ static int moxart_slave_config(struct dma_chan *chan, | |||
263 | return 0; | 263 | return 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
267 | unsigned long arg) | ||
268 | { | ||
269 | int ret = 0; | ||
270 | |||
271 | switch (cmd) { | ||
272 | case DMA_PAUSE: | ||
273 | case DMA_RESUME: | ||
274 | return -EINVAL; | ||
275 | case DMA_TERMINATE_ALL: | ||
276 | moxart_terminate_all(chan); | ||
277 | break; | ||
278 | case DMA_SLAVE_CONFIG: | ||
279 | ret = moxart_slave_config(chan, (struct dma_slave_config *)arg); | ||
280 | break; | ||
281 | default: | ||
282 | ret = -ENOSYS; | ||
283 | } | ||
284 | |||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | static struct dma_async_tx_descriptor *moxart_prep_slave_sg( | 266 | static struct dma_async_tx_descriptor *moxart_prep_slave_sg( |
289 | struct dma_chan *chan, struct scatterlist *sgl, | 267 | struct dma_chan *chan, struct scatterlist *sgl, |
290 | unsigned int sg_len, enum dma_transfer_direction dir, | 268 | unsigned int sg_len, enum dma_transfer_direction dir, |
@@ -531,7 +509,8 @@ static void moxart_dma_init(struct dma_device *dma, struct device *dev) | |||
531 | dma->device_free_chan_resources = moxart_free_chan_resources; | 509 | dma->device_free_chan_resources = moxart_free_chan_resources; |
532 | dma->device_issue_pending = moxart_issue_pending; | 510 | dma->device_issue_pending = moxart_issue_pending; |
533 | dma->device_tx_status = moxart_tx_status; | 511 | dma->device_tx_status = moxart_tx_status; |
534 | dma->device_control = moxart_control; | 512 | dma->device_config = moxart_slave_config; |
513 | dma->device_terminate_all = moxart_terminate_all; | ||
535 | dma->dev = dev; | 514 | dma->dev = dev; |
536 | 515 | ||
537 | INIT_LIST_HEAD(&dma->channels); | 516 | INIT_LIST_HEAD(&dma->channels); |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 01bec4023de2..57d2457545f3 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -800,79 +800,69 @@ err_prep: | |||
800 | return NULL; | 800 | return NULL; |
801 | } | 801 | } |
802 | 802 | ||
803 | static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 803 | static int mpc_dma_device_config(struct dma_chan *chan, |
804 | unsigned long arg) | 804 | struct dma_slave_config *cfg) |
805 | { | 805 | { |
806 | struct mpc_dma_chan *mchan; | 806 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
807 | struct mpc_dma *mdma; | ||
808 | struct dma_slave_config *cfg; | ||
809 | unsigned long flags; | 807 | unsigned long flags; |
810 | 808 | ||
811 | mchan = dma_chan_to_mpc_dma_chan(chan); | 809 | /* |
812 | switch (cmd) { | 810 | * Software constraints: |
813 | case DMA_TERMINATE_ALL: | 811 | * - only transfers between a peripheral device and |
814 | /* Disable channel requests */ | 812 | * memory are supported; |
815 | mdma = dma_chan_to_mpc_dma(chan); | 813 | * - only peripheral devices with 4-byte FIFO access register |
816 | 814 | * are supported; | |
817 | spin_lock_irqsave(&mchan->lock, flags); | 815 | * - minimal transfer chunk is 4 bytes and consequently |
818 | 816 | * source and destination addresses must be 4-byte aligned | |
819 | out_8(&mdma->regs->dmacerq, chan->chan_id); | 817 | * and transfer size must be aligned on (4 * maxburst) |
820 | list_splice_tail_init(&mchan->prepared, &mchan->free); | 818 | * boundary; |
821 | list_splice_tail_init(&mchan->queued, &mchan->free); | 819 | * - during the transfer RAM address is being incremented by |
822 | list_splice_tail_init(&mchan->active, &mchan->free); | 820 | * the size of minimal transfer chunk; |
823 | 821 | * - peripheral port's address is constant during the transfer. | |
824 | spin_unlock_irqrestore(&mchan->lock, flags); | 822 | */ |
825 | 823 | ||
826 | return 0; | 824 | if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || |
825 | cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || | ||
826 | !IS_ALIGNED(cfg->src_addr, 4) || | ||
827 | !IS_ALIGNED(cfg->dst_addr, 4)) { | ||
828 | return -EINVAL; | ||
829 | } | ||
827 | 830 | ||
828 | case DMA_SLAVE_CONFIG: | 831 | spin_lock_irqsave(&mchan->lock, flags); |
829 | /* | ||
830 | * Software constraints: | ||
831 | * - only transfers between a peripheral device and | ||
832 | * memory are supported; | ||
833 | * - only peripheral devices with 4-byte FIFO access register | ||
834 | * are supported; | ||
835 | * - minimal transfer chunk is 4 bytes and consequently | ||
836 | * source and destination addresses must be 4-byte aligned | ||
837 | * and transfer size must be aligned on (4 * maxburst) | ||
838 | * boundary; | ||
839 | * - during the transfer RAM address is being incremented by | ||
840 | * the size of minimal transfer chunk; | ||
841 | * - peripheral port's address is constant during the transfer. | ||
842 | */ | ||
843 | 832 | ||
844 | cfg = (void *)arg; | 833 | mchan->src_per_paddr = cfg->src_addr; |
834 | mchan->src_tcd_nunits = cfg->src_maxburst; | ||
835 | mchan->dst_per_paddr = cfg->dst_addr; | ||
836 | mchan->dst_tcd_nunits = cfg->dst_maxburst; | ||
845 | 837 | ||
846 | if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || | 838 | /* Apply defaults */ |
847 | cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || | 839 | if (mchan->src_tcd_nunits == 0) |
848 | !IS_ALIGNED(cfg->src_addr, 4) || | 840 | mchan->src_tcd_nunits = 1; |
849 | !IS_ALIGNED(cfg->dst_addr, 4)) { | 841 | if (mchan->dst_tcd_nunits == 0) |
850 | return -EINVAL; | 842 | mchan->dst_tcd_nunits = 1; |
851 | } | ||
852 | 843 | ||
853 | spin_lock_irqsave(&mchan->lock, flags); | 844 | spin_unlock_irqrestore(&mchan->lock, flags); |
854 | 845 | ||
855 | mchan->src_per_paddr = cfg->src_addr; | 846 | return 0; |
856 | mchan->src_tcd_nunits = cfg->src_maxburst; | 847 | } |
857 | mchan->dst_per_paddr = cfg->dst_addr; | ||
858 | mchan->dst_tcd_nunits = cfg->dst_maxburst; | ||
859 | 848 | ||
860 | /* Apply defaults */ | 849 | static int mpc_dma_device_terminate_all(struct dma_chan *chan) |
861 | if (mchan->src_tcd_nunits == 0) | 850 | { |
862 | mchan->src_tcd_nunits = 1; | 851 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
863 | if (mchan->dst_tcd_nunits == 0) | 852 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); |
864 | mchan->dst_tcd_nunits = 1; | 853 | unsigned long flags; |
865 | 854 | ||
866 | spin_unlock_irqrestore(&mchan->lock, flags); | 855 | /* Disable channel requests */ |
856 | spin_lock_irqsave(&mchan->lock, flags); | ||
867 | 857 | ||
868 | return 0; | 858 | out_8(&mdma->regs->dmacerq, chan->chan_id); |
859 | list_splice_tail_init(&mchan->prepared, &mchan->free); | ||
860 | list_splice_tail_init(&mchan->queued, &mchan->free); | ||
861 | list_splice_tail_init(&mchan->active, &mchan->free); | ||
869 | 862 | ||
870 | default: | 863 | spin_unlock_irqrestore(&mchan->lock, flags); |
871 | /* Unknown command */ | ||
872 | break; | ||
873 | } | ||
874 | 864 | ||
875 | return -ENXIO; | 865 | return 0; |
876 | } | 866 | } |
877 | 867 | ||
878 | static int mpc_dma_probe(struct platform_device *op) | 868 | static int mpc_dma_probe(struct platform_device *op) |
@@ -963,7 +953,8 @@ static int mpc_dma_probe(struct platform_device *op) | |||
963 | dma->device_tx_status = mpc_dma_tx_status; | 953 | dma->device_tx_status = mpc_dma_tx_status; |
964 | dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; | 954 | dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; |
965 | dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; | 955 | dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; |
966 | dma->device_control = mpc_dma_device_control; | 956 | dma->device_config = mpc_dma_device_config; |
957 | dma->device_terminate_all = mpc_dma_device_terminate_all; | ||
967 | 958 | ||
968 | INIT_LIST_HEAD(&dma->channels); | 959 | INIT_LIST_HEAD(&dma->channels); |
969 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | 960 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index d7ac558c2c1c..b03e8137b918 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -928,14 +928,6 @@ out: | |||
928 | return err; | 928 | return err; |
929 | } | 929 | } |
930 | 930 | ||
931 | /* This driver does not implement any of the optional DMA operations. */ | ||
932 | static int | ||
933 | mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
934 | unsigned long arg) | ||
935 | { | ||
936 | return -ENOSYS; | ||
937 | } | ||
938 | |||
939 | static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) | 931 | static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) |
940 | { | 932 | { |
941 | struct dma_chan *chan, *_chan; | 933 | struct dma_chan *chan, *_chan; |
@@ -1008,7 +1000,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1008 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | 1000 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; |
1009 | dma_dev->device_tx_status = mv_xor_status; | 1001 | dma_dev->device_tx_status = mv_xor_status; |
1010 | dma_dev->device_issue_pending = mv_xor_issue_pending; | 1002 | dma_dev->device_issue_pending = mv_xor_issue_pending; |
1011 | dma_dev->device_control = mv_xor_control; | ||
1012 | dma_dev->dev = &pdev->dev; | 1003 | dma_dev->dev = &pdev->dev; |
1013 | 1004 | ||
1014 | /* set prep routines based on capability */ | 1005 | /* set prep routines based on capability */ |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 5ea61201dbf0..829ec686dac3 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -202,8 +202,9 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) | |||
202 | return container_of(chan, struct mxs_dma_chan, chan); | 202 | return container_of(chan, struct mxs_dma_chan, chan); |
203 | } | 203 | } |
204 | 204 | ||
205 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | 205 | static void mxs_dma_reset_chan(struct dma_chan *chan) |
206 | { | 206 | { |
207 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
207 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 208 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
208 | int chan_id = mxs_chan->chan.chan_id; | 209 | int chan_id = mxs_chan->chan.chan_id; |
209 | 210 | ||
@@ -250,8 +251,9 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | |||
250 | mxs_chan->status = DMA_COMPLETE; | 251 | mxs_chan->status = DMA_COMPLETE; |
251 | } | 252 | } |
252 | 253 | ||
253 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | 254 | static void mxs_dma_enable_chan(struct dma_chan *chan) |
254 | { | 255 | { |
256 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
255 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 257 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
256 | int chan_id = mxs_chan->chan.chan_id; | 258 | int chan_id = mxs_chan->chan.chan_id; |
257 | 259 | ||
@@ -272,13 +274,16 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | |||
272 | mxs_chan->reset = false; | 274 | mxs_chan->reset = false; |
273 | } | 275 | } |
274 | 276 | ||
275 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) | 277 | static void mxs_dma_disable_chan(struct dma_chan *chan) |
276 | { | 278 | { |
279 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
280 | |||
277 | mxs_chan->status = DMA_COMPLETE; | 281 | mxs_chan->status = DMA_COMPLETE; |
278 | } | 282 | } |
279 | 283 | ||
280 | static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) | 284 | static int mxs_dma_pause_chan(struct dma_chan *chan) |
281 | { | 285 | { |
286 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
282 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 287 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
283 | int chan_id = mxs_chan->chan.chan_id; | 288 | int chan_id = mxs_chan->chan.chan_id; |
284 | 289 | ||
@@ -291,10 +296,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) | |||
291 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); | 296 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); |
292 | 297 | ||
293 | mxs_chan->status = DMA_PAUSED; | 298 | mxs_chan->status = DMA_PAUSED; |
299 | return 0; | ||
294 | } | 300 | } |
295 | 301 | ||
296 | static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) | 302 | static int mxs_dma_resume_chan(struct dma_chan *chan) |
297 | { | 303 | { |
304 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
298 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 305 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
299 | int chan_id = mxs_chan->chan.chan_id; | 306 | int chan_id = mxs_chan->chan.chan_id; |
300 | 307 | ||
@@ -307,6 +314,7 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) | |||
307 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); | 314 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); |
308 | 315 | ||
309 | mxs_chan->status = DMA_IN_PROGRESS; | 316 | mxs_chan->status = DMA_IN_PROGRESS; |
317 | return 0; | ||
310 | } | 318 | } |
311 | 319 | ||
312 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 320 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
@@ -383,7 +391,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | |||
383 | "%s: error in channel %d\n", __func__, | 391 | "%s: error in channel %d\n", __func__, |
384 | chan); | 392 | chan); |
385 | mxs_chan->status = DMA_ERROR; | 393 | mxs_chan->status = DMA_ERROR; |
386 | mxs_dma_reset_chan(mxs_chan); | 394 | mxs_dma_reset_chan(&mxs_chan->chan); |
387 | } else if (mxs_chan->status != DMA_COMPLETE) { | 395 | } else if (mxs_chan->status != DMA_COMPLETE) { |
388 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) { | 396 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) { |
389 | mxs_chan->status = DMA_IN_PROGRESS; | 397 | mxs_chan->status = DMA_IN_PROGRESS; |
@@ -432,7 +440,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
432 | if (ret) | 440 | if (ret) |
433 | goto err_clk; | 441 | goto err_clk; |
434 | 442 | ||
435 | mxs_dma_reset_chan(mxs_chan); | 443 | mxs_dma_reset_chan(chan); |
436 | 444 | ||
437 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); | 445 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); |
438 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; | 446 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; |
@@ -456,7 +464,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) | |||
456 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 464 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
457 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 465 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
458 | 466 | ||
459 | mxs_dma_disable_chan(mxs_chan); | 467 | mxs_dma_disable_chan(chan); |
460 | 468 | ||
461 | free_irq(mxs_chan->chan_irq, mxs_dma); | 469 | free_irq(mxs_chan->chan_irq, mxs_dma); |
462 | 470 | ||
@@ -651,28 +659,12 @@ err_out: | |||
651 | return NULL; | 659 | return NULL; |
652 | } | 660 | } |
653 | 661 | ||
654 | static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 662 | static int mxs_dma_terminate_all(struct dma_chan *chan) |
655 | unsigned long arg) | ||
656 | { | 663 | { |
657 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 664 | mxs_dma_reset_chan(chan); |
658 | int ret = 0; | 665 | mxs_dma_disable_chan(chan); |
659 | |||
660 | switch (cmd) { | ||
661 | case DMA_TERMINATE_ALL: | ||
662 | mxs_dma_reset_chan(mxs_chan); | ||
663 | mxs_dma_disable_chan(mxs_chan); | ||
664 | break; | ||
665 | case DMA_PAUSE: | ||
666 | mxs_dma_pause_chan(mxs_chan); | ||
667 | break; | ||
668 | case DMA_RESUME: | ||
669 | mxs_dma_resume_chan(mxs_chan); | ||
670 | break; | ||
671 | default: | ||
672 | ret = -ENOSYS; | ||
673 | } | ||
674 | 666 | ||
675 | return ret; | 667 | return 0; |
676 | } | 668 | } |
677 | 669 | ||
678 | static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | 670 | static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, |
@@ -701,13 +693,6 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | |||
701 | return mxs_chan->status; | 693 | return mxs_chan->status; |
702 | } | 694 | } |
703 | 695 | ||
704 | static void mxs_dma_issue_pending(struct dma_chan *chan) | ||
705 | { | ||
706 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
707 | |||
708 | mxs_dma_enable_chan(mxs_chan); | ||
709 | } | ||
710 | |||
711 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | 696 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) |
712 | { | 697 | { |
713 | int ret; | 698 | int ret; |
@@ -860,8 +845,14 @@ static int __init mxs_dma_probe(struct platform_device *pdev) | |||
860 | mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; | 845 | mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; |
861 | mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; | 846 | mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; |
862 | mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; | 847 | mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; |
863 | mxs_dma->dma_device.device_control = mxs_dma_control; | 848 | mxs_dma->dma_device.device_pause = mxs_dma_pause_chan; |
864 | mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; | 849 | mxs_dma->dma_device.device_resume = mxs_dma_resume_chan; |
850 | mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all; | ||
851 | mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
852 | mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
853 | mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
854 | mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
855 | mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan; | ||
865 | 856 | ||
866 | ret = dma_async_device_register(&mxs_dma->dma_device); | 857 | ret = dma_async_device_register(&mxs_dma->dma_device); |
867 | if (ret) { | 858 | if (ret) { |
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index d7d61e1a01c3..88b77c98365d 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c | |||
@@ -504,7 +504,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc, | |||
504 | * pauses DMA and reads out data received via DMA as well as those left | 504 | * pauses DMA and reads out data received via DMA as well as those left |
505 | * in the Rx FIFO. For this to work with the RAM side using burst | 505 | * in the Rx FIFO. For this to work with the RAM side using burst |
506 | * transfers we enable the SBE bit and terminate the transfer in our | 506 | * transfers we enable the SBE bit and terminate the transfer in our |
507 | * DMA_PAUSE handler. | 507 | * .device_pause handler. |
508 | */ | 508 | */ |
509 | mem_xfer = nbpf_xfer_ds(chan->nbpf, size); | 509 | mem_xfer = nbpf_xfer_ds(chan->nbpf, size); |
510 | 510 | ||
@@ -565,13 +565,6 @@ static void nbpf_configure(struct nbpf_device *nbpf) | |||
565 | nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); | 565 | nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); |
566 | } | 566 | } |
567 | 567 | ||
568 | static void nbpf_pause(struct nbpf_channel *chan) | ||
569 | { | ||
570 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); | ||
571 | /* See comment in nbpf_prep_one() */ | ||
572 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); | ||
573 | } | ||
574 | |||
575 | /* Generic part */ | 568 | /* Generic part */ |
576 | 569 | ||
577 | /* DMA ENGINE functions */ | 570 | /* DMA ENGINE functions */ |
@@ -837,54 +830,58 @@ static void nbpf_chan_idle(struct nbpf_channel *chan) | |||
837 | } | 830 | } |
838 | } | 831 | } |
839 | 832 | ||
840 | static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | 833 | static int nbpf_pause(struct dma_chan *dchan) |
841 | unsigned long arg) | ||
842 | { | 834 | { |
843 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | 835 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
844 | struct dma_slave_config *config; | ||
845 | 836 | ||
846 | dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd); | 837 | dev_dbg(dchan->device->dev, "Entry %s\n", __func__); |
847 | 838 | ||
848 | switch (cmd) { | 839 | chan->paused = true; |
849 | case DMA_TERMINATE_ALL: | 840 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); |
850 | dev_dbg(dchan->device->dev, "Terminating\n"); | 841 | /* See comment in nbpf_prep_one() */ |
851 | nbpf_chan_halt(chan); | 842 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); |
852 | nbpf_chan_idle(chan); | ||
853 | break; | ||
854 | 843 | ||
855 | case DMA_SLAVE_CONFIG: | 844 | return 0; |
856 | if (!arg) | 845 | } |
857 | return -EINVAL; | ||
858 | config = (struct dma_slave_config *)arg; | ||
859 | 846 | ||
860 | /* | 847 | static int nbpf_terminate_all(struct dma_chan *dchan) |
861 | * We could check config->slave_id to match chan->terminal here, | 848 | { |
862 | * but with DT they would be coming from the same source, so | 849 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
863 | * such a check would be superflous | ||
864 | */ | ||
865 | 850 | ||
866 | chan->slave_dst_addr = config->dst_addr; | 851 | dev_dbg(dchan->device->dev, "Entry %s\n", __func__); |
867 | chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, | 852 | dev_dbg(dchan->device->dev, "Terminating\n"); |
868 | config->dst_addr_width, 1); | ||
869 | chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, | ||
870 | config->dst_addr_width, | ||
871 | config->dst_maxburst); | ||
872 | chan->slave_src_addr = config->src_addr; | ||
873 | chan->slave_src_width = nbpf_xfer_size(chan->nbpf, | ||
874 | config->src_addr_width, 1); | ||
875 | chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, | ||
876 | config->src_addr_width, | ||
877 | config->src_maxburst); | ||
878 | break; | ||
879 | 853 | ||
880 | case DMA_PAUSE: | 854 | nbpf_chan_halt(chan); |
881 | chan->paused = true; | 855 | nbpf_chan_idle(chan); |
882 | nbpf_pause(chan); | ||
883 | break; | ||
884 | 856 | ||
885 | default: | 857 | return 0; |
886 | return -ENXIO; | 858 | } |
887 | } | 859 | |
860 | static int nbpf_config(struct dma_chan *dchan, | ||
861 | struct dma_slave_config *config) | ||
862 | { | ||
863 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | ||
864 | |||
865 | dev_dbg(dchan->device->dev, "Entry %s\n", __func__); | ||
866 | |||
867 | /* | ||
868 | * We could check config->slave_id to match chan->terminal here, | ||
869 | * but with DT they would be coming from the same source, so | ||
870 | * such a check would be superflous | ||
871 | */ | ||
872 | |||
873 | chan->slave_dst_addr = config->dst_addr; | ||
874 | chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, | ||
875 | config->dst_addr_width, 1); | ||
876 | chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, | ||
877 | config->dst_addr_width, | ||
878 | config->dst_maxburst); | ||
879 | chan->slave_src_addr = config->src_addr; | ||
880 | chan->slave_src_width = nbpf_xfer_size(chan->nbpf, | ||
881 | config->src_addr_width, 1); | ||
882 | chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, | ||
883 | config->src_addr_width, | ||
884 | config->src_maxburst); | ||
888 | 885 | ||
889 | return 0; | 886 | return 0; |
890 | } | 887 | } |
@@ -1072,18 +1069,6 @@ static void nbpf_free_chan_resources(struct dma_chan *dchan) | |||
1072 | } | 1069 | } |
1073 | } | 1070 | } |
1074 | 1071 | ||
1075 | static int nbpf_slave_caps(struct dma_chan *dchan, | ||
1076 | struct dma_slave_caps *caps) | ||
1077 | { | ||
1078 | caps->src_addr_widths = NBPF_DMA_BUSWIDTHS; | ||
1079 | caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS; | ||
1080 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1081 | caps->cmd_pause = false; | ||
1082 | caps->cmd_terminate = true; | ||
1083 | |||
1084 | return 0; | ||
1085 | } | ||
1086 | |||
1087 | static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, | 1072 | static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, |
1088 | struct of_dma *ofdma) | 1073 | struct of_dma *ofdma) |
1089 | { | 1074 | { |
@@ -1414,7 +1399,6 @@ static int nbpf_probe(struct platform_device *pdev) | |||
1414 | dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; | 1399 | dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; |
1415 | dma_dev->device_tx_status = nbpf_tx_status; | 1400 | dma_dev->device_tx_status = nbpf_tx_status; |
1416 | dma_dev->device_issue_pending = nbpf_issue_pending; | 1401 | dma_dev->device_issue_pending = nbpf_issue_pending; |
1417 | dma_dev->device_slave_caps = nbpf_slave_caps; | ||
1418 | 1402 | ||
1419 | /* | 1403 | /* |
1420 | * If we drop support for unaligned MEMCPY buffer addresses and / or | 1404 | * If we drop support for unaligned MEMCPY buffer addresses and / or |
@@ -1426,7 +1410,13 @@ static int nbpf_probe(struct platform_device *pdev) | |||
1426 | 1410 | ||
1427 | /* Compulsory for DMA_SLAVE fields */ | 1411 | /* Compulsory for DMA_SLAVE fields */ |
1428 | dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; | 1412 | dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; |
1429 | dma_dev->device_control = nbpf_control; | 1413 | dma_dev->device_config = nbpf_config; |
1414 | dma_dev->device_pause = nbpf_pause; | ||
1415 | dma_dev->device_terminate_all = nbpf_terminate_all; | ||
1416 | |||
1417 | dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS; | ||
1418 | dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS; | ||
1419 | dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1430 | 1420 | ||
1431 | platform_set_drvdata(pdev, nbpf); | 1421 | platform_set_drvdata(pdev, nbpf); |
1432 | 1422 | ||
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index c0016a68b446..7dd6dd121681 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -948,8 +948,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( | |||
948 | return vchan_tx_prep(&c->vc, &d->vd, flags); | 948 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
949 | } | 949 | } |
950 | 950 | ||
951 | static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) | 951 | static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) |
952 | { | 952 | { |
953 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
954 | |||
953 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | 955 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
954 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | 956 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
955 | return -EINVAL; | 957 | return -EINVAL; |
@@ -959,8 +961,9 @@ static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *c | |||
959 | return 0; | 961 | return 0; |
960 | } | 962 | } |
961 | 963 | ||
962 | static int omap_dma_terminate_all(struct omap_chan *c) | 964 | static int omap_dma_terminate_all(struct dma_chan *chan) |
963 | { | 965 | { |
966 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
964 | struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); | 967 | struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); |
965 | unsigned long flags; | 968 | unsigned long flags; |
966 | LIST_HEAD(head); | 969 | LIST_HEAD(head); |
@@ -996,8 +999,10 @@ static int omap_dma_terminate_all(struct omap_chan *c) | |||
996 | return 0; | 999 | return 0; |
997 | } | 1000 | } |
998 | 1001 | ||
999 | static int omap_dma_pause(struct omap_chan *c) | 1002 | static int omap_dma_pause(struct dma_chan *chan) |
1000 | { | 1003 | { |
1004 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
1005 | |||
1001 | /* Pause/Resume only allowed with cyclic mode */ | 1006 | /* Pause/Resume only allowed with cyclic mode */ |
1002 | if (!c->cyclic) | 1007 | if (!c->cyclic) |
1003 | return -EINVAL; | 1008 | return -EINVAL; |
@@ -1010,8 +1015,10 @@ static int omap_dma_pause(struct omap_chan *c) | |||
1010 | return 0; | 1015 | return 0; |
1011 | } | 1016 | } |
1012 | 1017 | ||
1013 | static int omap_dma_resume(struct omap_chan *c) | 1018 | static int omap_dma_resume(struct dma_chan *chan) |
1014 | { | 1019 | { |
1020 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
1021 | |||
1015 | /* Pause/Resume only allowed with cyclic mode */ | 1022 | /* Pause/Resume only allowed with cyclic mode */ |
1016 | if (!c->cyclic) | 1023 | if (!c->cyclic) |
1017 | return -EINVAL; | 1024 | return -EINVAL; |
@@ -1029,37 +1036,6 @@ static int omap_dma_resume(struct omap_chan *c) | |||
1029 | return 0; | 1036 | return 0; |
1030 | } | 1037 | } |
1031 | 1038 | ||
1032 | static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
1033 | unsigned long arg) | ||
1034 | { | ||
1035 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
1036 | int ret; | ||
1037 | |||
1038 | switch (cmd) { | ||
1039 | case DMA_SLAVE_CONFIG: | ||
1040 | ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); | ||
1041 | break; | ||
1042 | |||
1043 | case DMA_TERMINATE_ALL: | ||
1044 | ret = omap_dma_terminate_all(c); | ||
1045 | break; | ||
1046 | |||
1047 | case DMA_PAUSE: | ||
1048 | ret = omap_dma_pause(c); | ||
1049 | break; | ||
1050 | |||
1051 | case DMA_RESUME: | ||
1052 | ret = omap_dma_resume(c); | ||
1053 | break; | ||
1054 | |||
1055 | default: | ||
1056 | ret = -ENXIO; | ||
1057 | break; | ||
1058 | } | ||
1059 | |||
1060 | return ret; | ||
1061 | } | ||
1062 | |||
1063 | static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) | 1039 | static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) |
1064 | { | 1040 | { |
1065 | struct omap_chan *c; | 1041 | struct omap_chan *c; |
@@ -1094,19 +1070,6 @@ static void omap_dma_free(struct omap_dmadev *od) | |||
1094 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | 1070 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
1095 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | 1071 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) |
1096 | 1072 | ||
1097 | static int omap_dma_device_slave_caps(struct dma_chan *dchan, | ||
1098 | struct dma_slave_caps *caps) | ||
1099 | { | ||
1100 | caps->src_addr_widths = OMAP_DMA_BUSWIDTHS; | ||
1101 | caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS; | ||
1102 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1103 | caps->cmd_pause = true; | ||
1104 | caps->cmd_terminate = true; | ||
1105 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1106 | |||
1107 | return 0; | ||
1108 | } | ||
1109 | |||
1110 | static int omap_dma_probe(struct platform_device *pdev) | 1073 | static int omap_dma_probe(struct platform_device *pdev) |
1111 | { | 1074 | { |
1112 | struct omap_dmadev *od; | 1075 | struct omap_dmadev *od; |
@@ -1136,8 +1099,14 @@ static int omap_dma_probe(struct platform_device *pdev) | |||
1136 | od->ddev.device_issue_pending = omap_dma_issue_pending; | 1099 | od->ddev.device_issue_pending = omap_dma_issue_pending; |
1137 | od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; | 1100 | od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; |
1138 | od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; | 1101 | od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; |
1139 | od->ddev.device_control = omap_dma_control; | 1102 | od->ddev.device_config = omap_dma_slave_config; |
1140 | od->ddev.device_slave_caps = omap_dma_device_slave_caps; | 1103 | od->ddev.device_pause = omap_dma_pause; |
1104 | od->ddev.device_resume = omap_dma_resume; | ||
1105 | od->ddev.device_terminate_all = omap_dma_terminate_all; | ||
1106 | od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; | ||
1107 | od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; | ||
1108 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1109 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1141 | od->ddev.dev = &pdev->dev; | 1110 | od->ddev.dev = &pdev->dev; |
1142 | INIT_LIST_HEAD(&od->ddev.channels); | 1111 | INIT_LIST_HEAD(&od->ddev.channels); |
1143 | INIT_LIST_HEAD(&od->pending); | 1112 | INIT_LIST_HEAD(&od->pending); |
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 6e0e47d76b23..35c143cb88da 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -665,16 +665,12 @@ err_desc_get: | |||
665 | return NULL; | 665 | return NULL; |
666 | } | 666 | } |
667 | 667 | ||
668 | static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 668 | static int pd_device_terminate_all(struct dma_chan *chan) |
669 | unsigned long arg) | ||
670 | { | 669 | { |
671 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 670 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
672 | struct pch_dma_desc *desc, *_d; | 671 | struct pch_dma_desc *desc, *_d; |
673 | LIST_HEAD(list); | 672 | LIST_HEAD(list); |
674 | 673 | ||
675 | if (cmd != DMA_TERMINATE_ALL) | ||
676 | return -ENXIO; | ||
677 | |||
678 | spin_lock_irq(&pd_chan->lock); | 674 | spin_lock_irq(&pd_chan->lock); |
679 | 675 | ||
680 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); | 676 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); |
@@ -932,7 +928,7 @@ static int pch_dma_probe(struct pci_dev *pdev, | |||
932 | pd->dma.device_tx_status = pd_tx_status; | 928 | pd->dma.device_tx_status = pd_tx_status; |
933 | pd->dma.device_issue_pending = pd_issue_pending; | 929 | pd->dma.device_issue_pending = pd_issue_pending; |
934 | pd->dma.device_prep_slave_sg = pd_prep_slave_sg; | 930 | pd->dma.device_prep_slave_sg = pd_prep_slave_sg; |
935 | pd->dma.device_control = pd_device_control; | 931 | pd->dma.device_terminate_all = pd_device_terminate_all; |
936 | 932 | ||
937 | err = dma_async_device_register(&pd->dma); | 933 | err = dma_async_device_register(&pd->dma); |
938 | if (err) { | 934 | if (err) { |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index bdf40b530032..027f1d7ea4fc 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2086,78 +2086,63 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) | |||
2086 | return 1; | 2086 | return 1; |
2087 | } | 2087 | } |
2088 | 2088 | ||
2089 | static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) | 2089 | static int pl330_config(struct dma_chan *chan, |
2090 | struct dma_slave_config *slave_config) | ||
2091 | { | ||
2092 | struct dma_pl330_chan *pch = to_pchan(chan); | ||
2093 | |||
2094 | if (slave_config->direction == DMA_MEM_TO_DEV) { | ||
2095 | if (slave_config->dst_addr) | ||
2096 | pch->fifo_addr = slave_config->dst_addr; | ||
2097 | if (slave_config->dst_addr_width) | ||
2098 | pch->burst_sz = __ffs(slave_config->dst_addr_width); | ||
2099 | if (slave_config->dst_maxburst) | ||
2100 | pch->burst_len = slave_config->dst_maxburst; | ||
2101 | } else if (slave_config->direction == DMA_DEV_TO_MEM) { | ||
2102 | if (slave_config->src_addr) | ||
2103 | pch->fifo_addr = slave_config->src_addr; | ||
2104 | if (slave_config->src_addr_width) | ||
2105 | pch->burst_sz = __ffs(slave_config->src_addr_width); | ||
2106 | if (slave_config->src_maxburst) | ||
2107 | pch->burst_len = slave_config->src_maxburst; | ||
2108 | } | ||
2109 | |||
2110 | return 0; | ||
2111 | } | ||
2112 | |||
2113 | static int pl330_terminate_all(struct dma_chan *chan) | ||
2090 | { | 2114 | { |
2091 | struct dma_pl330_chan *pch = to_pchan(chan); | 2115 | struct dma_pl330_chan *pch = to_pchan(chan); |
2092 | struct dma_pl330_desc *desc; | 2116 | struct dma_pl330_desc *desc; |
2093 | unsigned long flags; | 2117 | unsigned long flags; |
2094 | struct pl330_dmac *pl330 = pch->dmac; | 2118 | struct pl330_dmac *pl330 = pch->dmac; |
2095 | struct dma_slave_config *slave_config; | ||
2096 | LIST_HEAD(list); | 2119 | LIST_HEAD(list); |
2097 | 2120 | ||
2098 | switch (cmd) { | 2121 | spin_lock_irqsave(&pch->lock, flags); |
2099 | case DMA_TERMINATE_ALL: | 2122 | spin_lock(&pl330->lock); |
2100 | pm_runtime_get_sync(pl330->ddma.dev); | 2123 | _stop(pch->thread); |
2101 | spin_lock_irqsave(&pch->lock, flags); | 2124 | spin_unlock(&pl330->lock); |
2102 | 2125 | ||
2103 | spin_lock(&pl330->lock); | 2126 | pch->thread->req[0].desc = NULL; |
2104 | _stop(pch->thread); | 2127 | pch->thread->req[1].desc = NULL; |
2105 | spin_unlock(&pl330->lock); | 2128 | pch->thread->req_running = -1; |
2106 | 2129 | ||
2107 | pch->thread->req[0].desc = NULL; | 2130 | /* Mark all desc done */ |
2108 | pch->thread->req[1].desc = NULL; | 2131 | list_for_each_entry(desc, &pch->submitted_list, node) { |
2109 | pch->thread->req_running = -1; | 2132 | desc->status = FREE; |
2110 | 2133 | dma_cookie_complete(&desc->txd); | |
2111 | /* Mark all desc done */ | 2134 | } |
2112 | list_for_each_entry(desc, &pch->submitted_list, node) { | ||
2113 | desc->status = FREE; | ||
2114 | dma_cookie_complete(&desc->txd); | ||
2115 | } | ||
2116 | |||
2117 | list_for_each_entry(desc, &pch->work_list , node) { | ||
2118 | desc->status = FREE; | ||
2119 | dma_cookie_complete(&desc->txd); | ||
2120 | } | ||
2121 | |||
2122 | list_for_each_entry(desc, &pch->completed_list , node) { | ||
2123 | desc->status = FREE; | ||
2124 | dma_cookie_complete(&desc->txd); | ||
2125 | } | ||
2126 | |||
2127 | if (!list_empty(&pch->work_list)) | ||
2128 | pm_runtime_put(pl330->ddma.dev); | ||
2129 | 2135 | ||
2130 | list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); | 2136 | list_for_each_entry(desc, &pch->work_list , node) { |
2131 | list_splice_tail_init(&pch->work_list, &pl330->desc_pool); | 2137 | desc->status = FREE; |
2132 | list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); | 2138 | dma_cookie_complete(&desc->txd); |
2133 | spin_unlock_irqrestore(&pch->lock, flags); | ||
2134 | pm_runtime_mark_last_busy(pl330->ddma.dev); | ||
2135 | pm_runtime_put_autosuspend(pl330->ddma.dev); | ||
2136 | break; | ||
2137 | case DMA_SLAVE_CONFIG: | ||
2138 | slave_config = (struct dma_slave_config *)arg; | ||
2139 | |||
2140 | if (slave_config->direction == DMA_MEM_TO_DEV) { | ||
2141 | if (slave_config->dst_addr) | ||
2142 | pch->fifo_addr = slave_config->dst_addr; | ||
2143 | if (slave_config->dst_addr_width) | ||
2144 | pch->burst_sz = __ffs(slave_config->dst_addr_width); | ||
2145 | if (slave_config->dst_maxburst) | ||
2146 | pch->burst_len = slave_config->dst_maxburst; | ||
2147 | } else if (slave_config->direction == DMA_DEV_TO_MEM) { | ||
2148 | if (slave_config->src_addr) | ||
2149 | pch->fifo_addr = slave_config->src_addr; | ||
2150 | if (slave_config->src_addr_width) | ||
2151 | pch->burst_sz = __ffs(slave_config->src_addr_width); | ||
2152 | if (slave_config->src_maxburst) | ||
2153 | pch->burst_len = slave_config->src_maxburst; | ||
2154 | } | ||
2155 | break; | ||
2156 | default: | ||
2157 | dev_err(pch->dmac->ddma.dev, "Not supported command.\n"); | ||
2158 | return -ENXIO; | ||
2159 | } | 2139 | } |
2160 | 2140 | ||
2141 | list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); | ||
2142 | list_splice_tail_init(&pch->work_list, &pl330->desc_pool); | ||
2143 | list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); | ||
2144 | spin_unlock_irqrestore(&pch->lock, flags); | ||
2145 | |||
2161 | return 0; | 2146 | return 0; |
2162 | } | 2147 | } |
2163 | 2148 | ||
@@ -2623,19 +2608,6 @@ static irqreturn_t pl330_irq_handler(int irq, void *data) | |||
2623 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | 2608 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ |
2624 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | 2609 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) |
2625 | 2610 | ||
2626 | static int pl330_dma_device_slave_caps(struct dma_chan *dchan, | ||
2627 | struct dma_slave_caps *caps) | ||
2628 | { | ||
2629 | caps->src_addr_widths = PL330_DMA_BUSWIDTHS; | ||
2630 | caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS; | ||
2631 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
2632 | caps->cmd_pause = false; | ||
2633 | caps->cmd_terminate = true; | ||
2634 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
2635 | |||
2636 | return 0; | ||
2637 | } | ||
2638 | |||
2639 | /* | 2611 | /* |
2640 | * Runtime PM callbacks are provided by amba/bus.c driver. | 2612 | * Runtime PM callbacks are provided by amba/bus.c driver. |
2641 | * | 2613 | * |
@@ -2793,9 +2765,13 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2793 | pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; | 2765 | pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; |
2794 | pd->device_tx_status = pl330_tx_status; | 2766 | pd->device_tx_status = pl330_tx_status; |
2795 | pd->device_prep_slave_sg = pl330_prep_slave_sg; | 2767 | pd->device_prep_slave_sg = pl330_prep_slave_sg; |
2796 | pd->device_control = pl330_control; | 2768 | pd->device_config = pl330_config; |
2769 | pd->device_terminate_all = pl330_terminate_all; | ||
2797 | pd->device_issue_pending = pl330_issue_pending; | 2770 | pd->device_issue_pending = pl330_issue_pending; |
2798 | pd->device_slave_caps = pl330_dma_device_slave_caps; | 2771 | pd->src_addr_widths = PL330_DMA_BUSWIDTHS; |
2772 | pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; | ||
2773 | pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
2774 | pd->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
2799 | 2775 | ||
2800 | ret = dma_async_device_register(pd); | 2776 | ret = dma_async_device_register(pd); |
2801 | if (ret) { | 2777 | if (ret) { |
@@ -2847,7 +2823,7 @@ probe_err3: | |||
2847 | 2823 | ||
2848 | /* Flush the channel */ | 2824 | /* Flush the channel */ |
2849 | if (pch->thread) { | 2825 | if (pch->thread) { |
2850 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); | 2826 | pl330_terminate_all(&pch->chan); |
2851 | pl330_free_chan_resources(&pch->chan); | 2827 | pl330_free_chan_resources(&pch->chan); |
2852 | } | 2828 | } |
2853 | } | 2829 | } |
@@ -2878,7 +2854,7 @@ static int pl330_remove(struct amba_device *adev) | |||
2878 | 2854 | ||
2879 | /* Flush the channel */ | 2855 | /* Flush the channel */ |
2880 | if (pch->thread) { | 2856 | if (pch->thread) { |
2881 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); | 2857 | pl330_terminate_all(&pch->chan); |
2882 | pl330_free_chan_resources(&pch->chan); | 2858 | pl330_free_chan_resources(&pch->chan); |
2883 | } | 2859 | } |
2884 | } | 2860 | } |
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c index 3122a99ec06b..d7a33b3ac466 100644 --- a/drivers/dma/qcom_bam_dma.c +++ b/drivers/dma/qcom_bam_dma.c | |||
@@ -530,11 +530,18 @@ static void bam_free_chan(struct dma_chan *chan) | |||
530 | * Sets slave configuration for channel | 530 | * Sets slave configuration for channel |
531 | * | 531 | * |
532 | */ | 532 | */ |
533 | static void bam_slave_config(struct bam_chan *bchan, | 533 | static int bam_slave_config(struct dma_chan *chan, |
534 | struct dma_slave_config *cfg) | 534 | struct dma_slave_config *cfg) |
535 | { | 535 | { |
536 | struct bam_chan *bchan = to_bam_chan(chan); | ||
537 | unsigned long flag; | ||
538 | |||
539 | spin_lock_irqsave(&bchan->vc.lock, flag); | ||
536 | memcpy(&bchan->slave, cfg, sizeof(*cfg)); | 540 | memcpy(&bchan->slave, cfg, sizeof(*cfg)); |
537 | bchan->reconfigure = 1; | 541 | bchan->reconfigure = 1; |
542 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | ||
543 | |||
544 | return 0; | ||
538 | } | 545 | } |
539 | 546 | ||
540 | /** | 547 | /** |
@@ -627,8 +634,9 @@ err_out: | |||
627 | * No callbacks are done | 634 | * No callbacks are done |
628 | * | 635 | * |
629 | */ | 636 | */ |
630 | static void bam_dma_terminate_all(struct bam_chan *bchan) | 637 | static int bam_dma_terminate_all(struct dma_chan *chan) |
631 | { | 638 | { |
639 | struct bam_chan *bchan = to_bam_chan(chan); | ||
632 | unsigned long flag; | 640 | unsigned long flag; |
633 | LIST_HEAD(head); | 641 | LIST_HEAD(head); |
634 | 642 | ||
@@ -643,56 +651,46 @@ static void bam_dma_terminate_all(struct bam_chan *bchan) | |||
643 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | 651 | spin_unlock_irqrestore(&bchan->vc.lock, flag); |
644 | 652 | ||
645 | vchan_dma_desc_free_list(&bchan->vc, &head); | 653 | vchan_dma_desc_free_list(&bchan->vc, &head); |
654 | |||
655 | return 0; | ||
646 | } | 656 | } |
647 | 657 | ||
648 | /** | 658 | /** |
649 | * bam_control - DMA device control | 659 | * bam_pause - Pause DMA channel |
650 | * @chan: dma channel | 660 | * @chan: dma channel |
651 | * @cmd: control cmd | ||
652 | * @arg: cmd argument | ||
653 | * | 661 | * |
654 | * Perform DMA control command | 662 | */ |
663 | static int bam_pause(struct dma_chan *chan) | ||
664 | { | ||
665 | struct bam_chan *bchan = to_bam_chan(chan); | ||
666 | struct bam_device *bdev = bchan->bdev; | ||
667 | unsigned long flag; | ||
668 | |||
669 | spin_lock_irqsave(&bchan->vc.lock, flag); | ||
670 | writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); | ||
671 | bchan->paused = 1; | ||
672 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | ||
673 | |||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | /** | ||
678 | * bam_resume - Resume DMA channel operations | ||
679 | * @chan: dma channel | ||
655 | * | 680 | * |
656 | */ | 681 | */ |
657 | static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 682 | static int bam_resume(struct dma_chan *chan) |
658 | unsigned long arg) | ||
659 | { | 683 | { |
660 | struct bam_chan *bchan = to_bam_chan(chan); | 684 | struct bam_chan *bchan = to_bam_chan(chan); |
661 | struct bam_device *bdev = bchan->bdev; | 685 | struct bam_device *bdev = bchan->bdev; |
662 | int ret = 0; | ||
663 | unsigned long flag; | 686 | unsigned long flag; |
664 | 687 | ||
665 | switch (cmd) { | 688 | spin_lock_irqsave(&bchan->vc.lock, flag); |
666 | case DMA_PAUSE: | 689 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); |
667 | spin_lock_irqsave(&bchan->vc.lock, flag); | 690 | bchan->paused = 0; |
668 | writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); | 691 | spin_unlock_irqrestore(&bchan->vc.lock, flag); |
669 | bchan->paused = 1; | ||
670 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | ||
671 | break; | ||
672 | |||
673 | case DMA_RESUME: | ||
674 | spin_lock_irqsave(&bchan->vc.lock, flag); | ||
675 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); | ||
676 | bchan->paused = 0; | ||
677 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | ||
678 | break; | ||
679 | |||
680 | case DMA_TERMINATE_ALL: | ||
681 | bam_dma_terminate_all(bchan); | ||
682 | break; | ||
683 | |||
684 | case DMA_SLAVE_CONFIG: | ||
685 | spin_lock_irqsave(&bchan->vc.lock, flag); | ||
686 | bam_slave_config(bchan, (struct dma_slave_config *)arg); | ||
687 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | ||
688 | break; | ||
689 | |||
690 | default: | ||
691 | ret = -ENXIO; | ||
692 | break; | ||
693 | } | ||
694 | 692 | ||
695 | return ret; | 693 | return 0; |
696 | } | 694 | } |
697 | 695 | ||
698 | /** | 696 | /** |
@@ -1148,7 +1146,10 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
1148 | bdev->common.device_alloc_chan_resources = bam_alloc_chan; | 1146 | bdev->common.device_alloc_chan_resources = bam_alloc_chan; |
1149 | bdev->common.device_free_chan_resources = bam_free_chan; | 1147 | bdev->common.device_free_chan_resources = bam_free_chan; |
1150 | bdev->common.device_prep_slave_sg = bam_prep_slave_sg; | 1148 | bdev->common.device_prep_slave_sg = bam_prep_slave_sg; |
1151 | bdev->common.device_control = bam_control; | 1149 | bdev->common.device_config = bam_slave_config; |
1150 | bdev->common.device_pause = bam_pause; | ||
1151 | bdev->common.device_resume = bam_resume; | ||
1152 | bdev->common.device_terminate_all = bam_dma_terminate_all; | ||
1152 | bdev->common.device_issue_pending = bam_issue_pending; | 1153 | bdev->common.device_issue_pending = bam_issue_pending; |
1153 | bdev->common.device_tx_status = bam_tx_status; | 1154 | bdev->common.device_tx_status = bam_tx_status; |
1154 | bdev->common.dev = bdev->dev; | 1155 | bdev->common.dev = bdev->dev; |
@@ -1187,7 +1188,7 @@ static int bam_dma_remove(struct platform_device *pdev) | |||
1187 | devm_free_irq(bdev->dev, bdev->irq, bdev); | 1188 | devm_free_irq(bdev->dev, bdev->irq, bdev); |
1188 | 1189 | ||
1189 | for (i = 0; i < bdev->num_channels; i++) { | 1190 | for (i = 0; i < bdev->num_channels; i++) { |
1190 | bam_dma_terminate_all(&bdev->channels[i]); | 1191 | bam_dma_terminate_all(&bdev->channels[i].vc.chan); |
1191 | tasklet_kill(&bdev->channels[i].vc.task); | 1192 | tasklet_kill(&bdev->channels[i].vc.task); |
1192 | 1193 | ||
1193 | dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, | 1194 | dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, |
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 6941a77521c3..4d5a84815ba7 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c | |||
@@ -384,20 +384,30 @@ static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan) | |||
384 | return tc * txd->width; | 384 | return tc * txd->width; |
385 | } | 385 | } |
386 | 386 | ||
387 | static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan, | 387 | static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan, |
388 | struct dma_slave_config *config) | 388 | struct dma_slave_config *config) |
389 | { | 389 | { |
390 | if (!s3cchan->slave) | 390 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); |
391 | return -EINVAL; | 391 | unsigned long flags; |
392 | int ret = 0; | ||
392 | 393 | ||
393 | /* Reject definitely invalid configurations */ | 394 | /* Reject definitely invalid configurations */ |
394 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | 395 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
395 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | 396 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
396 | return -EINVAL; | 397 | return -EINVAL; |
397 | 398 | ||
399 | spin_lock_irqsave(&s3cchan->vc.lock, flags); | ||
400 | |||
401 | if (!s3cchan->slave) { | ||
402 | ret = -EINVAL; | ||
403 | goto out; | ||
404 | } | ||
405 | |||
398 | s3cchan->cfg = *config; | 406 | s3cchan->cfg = *config; |
399 | 407 | ||
400 | return 0; | 408 | out: |
409 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | ||
410 | return ret; | ||
401 | } | 411 | } |
402 | 412 | ||
403 | /* | 413 | /* |
@@ -703,53 +713,38 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data) | |||
703 | * The DMA ENGINE API | 713 | * The DMA ENGINE API |
704 | */ | 714 | */ |
705 | 715 | ||
706 | static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 716 | static int s3c24xx_dma_terminate_all(struct dma_chan *chan) |
707 | unsigned long arg) | ||
708 | { | 717 | { |
709 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | 718 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); |
710 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | 719 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; |
711 | unsigned long flags; | 720 | unsigned long flags; |
712 | int ret = 0; | ||
713 | 721 | ||
714 | spin_lock_irqsave(&s3cchan->vc.lock, flags); | 722 | spin_lock_irqsave(&s3cchan->vc.lock, flags); |
715 | 723 | ||
716 | switch (cmd) { | 724 | if (!s3cchan->phy && !s3cchan->at) { |
717 | case DMA_SLAVE_CONFIG: | 725 | dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n", |
718 | ret = s3c24xx_dma_set_runtime_config(s3cchan, | 726 | s3cchan->id); |
719 | (struct dma_slave_config *)arg); | 727 | return -EINVAL; |
720 | break; | 728 | } |
721 | case DMA_TERMINATE_ALL: | ||
722 | if (!s3cchan->phy && !s3cchan->at) { | ||
723 | dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n", | ||
724 | s3cchan->id); | ||
725 | ret = -EINVAL; | ||
726 | break; | ||
727 | } | ||
728 | |||
729 | s3cchan->state = S3C24XX_DMA_CHAN_IDLE; | ||
730 | 729 | ||
731 | /* Mark physical channel as free */ | 730 | s3cchan->state = S3C24XX_DMA_CHAN_IDLE; |
732 | if (s3cchan->phy) | ||
733 | s3c24xx_dma_phy_free(s3cchan); | ||
734 | 731 | ||
735 | /* Dequeue current job */ | 732 | /* Mark physical channel as free */ |
736 | if (s3cchan->at) { | 733 | if (s3cchan->phy) |
737 | s3c24xx_dma_desc_free(&s3cchan->at->vd); | 734 | s3c24xx_dma_phy_free(s3cchan); |
738 | s3cchan->at = NULL; | ||
739 | } | ||
740 | 735 | ||
741 | /* Dequeue jobs not yet fired as well */ | 736 | /* Dequeue current job */ |
742 | s3c24xx_dma_free_txd_list(s3cdma, s3cchan); | 737 | if (s3cchan->at) { |
743 | break; | 738 | s3c24xx_dma_desc_free(&s3cchan->at->vd); |
744 | default: | 739 | s3cchan->at = NULL; |
745 | /* Unknown command */ | ||
746 | ret = -ENXIO; | ||
747 | break; | ||
748 | } | 740 | } |
749 | 741 | ||
742 | /* Dequeue jobs not yet fired as well */ | ||
743 | s3c24xx_dma_free_txd_list(s3cdma, s3cchan); | ||
744 | |||
750 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | 745 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); |
751 | 746 | ||
752 | return ret; | 747 | return 0; |
753 | } | 748 | } |
754 | 749 | ||
755 | static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan) | 750 | static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan) |
@@ -1300,7 +1295,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) | |||
1300 | s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; | 1295 | s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; |
1301 | s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; | 1296 | s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; |
1302 | s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; | 1297 | s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; |
1303 | s3cdma->memcpy.device_control = s3c24xx_dma_control; | 1298 | s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config; |
1299 | s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all; | ||
1304 | 1300 | ||
1305 | /* Initialize slave engine for SoC internal dedicated peripherals */ | 1301 | /* Initialize slave engine for SoC internal dedicated peripherals */ |
1306 | dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); | 1302 | dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); |
@@ -1315,7 +1311,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) | |||
1315 | s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; | 1311 | s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; |
1316 | s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; | 1312 | s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; |
1317 | s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; | 1313 | s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; |
1318 | s3cdma->slave.device_control = s3c24xx_dma_control; | 1314 | s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config; |
1315 | s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all; | ||
1319 | 1316 | ||
1320 | /* Register as many memcpy channels as there are physical channels */ | 1317 | /* Register as many memcpy channels as there are physical channels */ |
1321 | ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, | 1318 | ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, |
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 96bb62c39c41..5adf5407a8cb 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c | |||
@@ -669,8 +669,10 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( | |||
669 | return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 669 | return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
670 | } | 670 | } |
671 | 671 | ||
672 | static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) | 672 | static int sa11x0_dma_device_config(struct dma_chan *chan, |
673 | struct dma_slave_config *cfg) | ||
673 | { | 674 | { |
675 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
674 | u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); | 676 | u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); |
675 | dma_addr_t addr; | 677 | dma_addr_t addr; |
676 | enum dma_slave_buswidth width; | 678 | enum dma_slave_buswidth width; |
@@ -704,99 +706,101 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c | |||
704 | return 0; | 706 | return 0; |
705 | } | 707 | } |
706 | 708 | ||
707 | static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 709 | static int sa11x0_dma_device_pause(struct dma_chan *chan) |
708 | unsigned long arg) | ||
709 | { | 710 | { |
710 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | 711 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
711 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | 712 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
712 | struct sa11x0_dma_phy *p; | 713 | struct sa11x0_dma_phy *p; |
713 | LIST_HEAD(head); | 714 | LIST_HEAD(head); |
714 | unsigned long flags; | 715 | unsigned long flags; |
715 | int ret; | ||
716 | 716 | ||
717 | switch (cmd) { | 717 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); |
718 | case DMA_SLAVE_CONFIG: | 718 | spin_lock_irqsave(&c->vc.lock, flags); |
719 | return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); | 719 | if (c->status == DMA_IN_PROGRESS) { |
720 | 720 | c->status = DMA_PAUSED; | |
721 | case DMA_TERMINATE_ALL: | ||
722 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); | ||
723 | /* Clear the tx descriptor lists */ | ||
724 | spin_lock_irqsave(&c->vc.lock, flags); | ||
725 | vchan_get_all_descriptors(&c->vc, &head); | ||
726 | 721 | ||
727 | p = c->phy; | 722 | p = c->phy; |
728 | if (p) { | 723 | if (p) { |
729 | dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); | 724 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); |
730 | /* vchan is assigned to a pchan - stop the channel */ | 725 | } else { |
731 | writel(DCSR_RUN | DCSR_IE | | ||
732 | DCSR_STRTA | DCSR_DONEA | | ||
733 | DCSR_STRTB | DCSR_DONEB, | ||
734 | p->base + DMA_DCSR_C); | ||
735 | |||
736 | if (p->txd_load) { | ||
737 | if (p->txd_load != p->txd_done) | ||
738 | list_add_tail(&p->txd_load->vd.node, &head); | ||
739 | p->txd_load = NULL; | ||
740 | } | ||
741 | if (p->txd_done) { | ||
742 | list_add_tail(&p->txd_done->vd.node, &head); | ||
743 | p->txd_done = NULL; | ||
744 | } | ||
745 | c->phy = NULL; | ||
746 | spin_lock(&d->lock); | 726 | spin_lock(&d->lock); |
747 | p->vchan = NULL; | 727 | list_del_init(&c->node); |
748 | spin_unlock(&d->lock); | 728 | spin_unlock(&d->lock); |
749 | tasklet_schedule(&d->task); | ||
750 | } | 729 | } |
751 | spin_unlock_irqrestore(&c->vc.lock, flags); | 730 | } |
752 | vchan_dma_desc_free_list(&c->vc, &head); | 731 | spin_unlock_irqrestore(&c->vc.lock, flags); |
753 | ret = 0; | ||
754 | break; | ||
755 | 732 | ||
756 | case DMA_PAUSE: | 733 | return 0; |
757 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); | 734 | } |
758 | spin_lock_irqsave(&c->vc.lock, flags); | ||
759 | if (c->status == DMA_IN_PROGRESS) { | ||
760 | c->status = DMA_PAUSED; | ||
761 | 735 | ||
762 | p = c->phy; | 736 | static int sa11x0_dma_device_resume(struct dma_chan *chan) |
763 | if (p) { | 737 | { |
764 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); | 738 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
765 | } else { | 739 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
766 | spin_lock(&d->lock); | 740 | struct sa11x0_dma_phy *p; |
767 | list_del_init(&c->node); | 741 | LIST_HEAD(head); |
768 | spin_unlock(&d->lock); | 742 | unsigned long flags; |
769 | } | ||
770 | } | ||
771 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
772 | ret = 0; | ||
773 | break; | ||
774 | 743 | ||
775 | case DMA_RESUME: | 744 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); |
776 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); | 745 | spin_lock_irqsave(&c->vc.lock, flags); |
777 | spin_lock_irqsave(&c->vc.lock, flags); | 746 | if (c->status == DMA_PAUSED) { |
778 | if (c->status == DMA_PAUSED) { | 747 | c->status = DMA_IN_PROGRESS; |
779 | c->status = DMA_IN_PROGRESS; | 748 | |
780 | 749 | p = c->phy; | |
781 | p = c->phy; | 750 | if (p) { |
782 | if (p) { | 751 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); |
783 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); | 752 | } else if (!list_empty(&c->vc.desc_issued)) { |
784 | } else if (!list_empty(&c->vc.desc_issued)) { | 753 | spin_lock(&d->lock); |
785 | spin_lock(&d->lock); | 754 | list_add_tail(&c->node, &d->chan_pending); |
786 | list_add_tail(&c->node, &d->chan_pending); | 755 | spin_unlock(&d->lock); |
787 | spin_unlock(&d->lock); | ||
788 | } | ||
789 | } | 756 | } |
790 | spin_unlock_irqrestore(&c->vc.lock, flags); | 757 | } |
791 | ret = 0; | 758 | spin_unlock_irqrestore(&c->vc.lock, flags); |
792 | break; | ||
793 | 759 | ||
794 | default: | 760 | return 0; |
795 | ret = -ENXIO; | 761 | } |
796 | break; | 762 | |
763 | static int sa11x0_dma_device_terminate_all(struct dma_chan *chan) | ||
764 | { | ||
765 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
766 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | ||
767 | struct sa11x0_dma_phy *p; | ||
768 | LIST_HEAD(head); | ||
769 | unsigned long flags; | ||
770 | |||
771 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); | ||
772 | /* Clear the tx descriptor lists */ | ||
773 | spin_lock_irqsave(&c->vc.lock, flags); | ||
774 | vchan_get_all_descriptors(&c->vc, &head); | ||
775 | |||
776 | p = c->phy; | ||
777 | if (p) { | ||
778 | dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); | ||
779 | /* vchan is assigned to a pchan - stop the channel */ | ||
780 | writel(DCSR_RUN | DCSR_IE | | ||
781 | DCSR_STRTA | DCSR_DONEA | | ||
782 | DCSR_STRTB | DCSR_DONEB, | ||
783 | p->base + DMA_DCSR_C); | ||
784 | |||
785 | if (p->txd_load) { | ||
786 | if (p->txd_load != p->txd_done) | ||
787 | list_add_tail(&p->txd_load->vd.node, &head); | ||
788 | p->txd_load = NULL; | ||
789 | } | ||
790 | if (p->txd_done) { | ||
791 | list_add_tail(&p->txd_done->vd.node, &head); | ||
792 | p->txd_done = NULL; | ||
793 | } | ||
794 | c->phy = NULL; | ||
795 | spin_lock(&d->lock); | ||
796 | p->vchan = NULL; | ||
797 | spin_unlock(&d->lock); | ||
798 | tasklet_schedule(&d->task); | ||
797 | } | 799 | } |
800 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
801 | vchan_dma_desc_free_list(&c->vc, &head); | ||
798 | 802 | ||
799 | return ret; | 803 | return 0; |
800 | } | 804 | } |
801 | 805 | ||
802 | struct sa11x0_dma_channel_desc { | 806 | struct sa11x0_dma_channel_desc { |
@@ -833,7 +837,10 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, | |||
833 | dmadev->dev = dev; | 837 | dmadev->dev = dev; |
834 | dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; | 838 | dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; |
835 | dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; | 839 | dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; |
836 | dmadev->device_control = sa11x0_dma_control; | 840 | dmadev->device_config = sa11x0_dma_device_config; |
841 | dmadev->device_pause = sa11x0_dma_device_pause; | ||
842 | dmadev->device_resume = sa11x0_dma_device_resume; | ||
843 | dmadev->device_terminate_all = sa11x0_dma_device_terminate_all; | ||
837 | dmadev->device_tx_status = sa11x0_dma_tx_status; | 844 | dmadev->device_tx_status = sa11x0_dma_tx_status; |
838 | dmadev->device_issue_pending = sa11x0_dma_issue_pending; | 845 | dmadev->device_issue_pending = sa11x0_dma_issue_pending; |
839 | 846 | ||
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index 20a6f6f2a018..749f26ecd3b3 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c | |||
@@ -534,6 +534,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id) | |||
534 | 534 | ||
535 | static int hpb_dmae_probe(struct platform_device *pdev) | 535 | static int hpb_dmae_probe(struct platform_device *pdev) |
536 | { | 536 | { |
537 | const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | | ||
538 | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
537 | struct hpb_dmae_pdata *pdata = pdev->dev.platform_data; | 539 | struct hpb_dmae_pdata *pdata = pdev->dev.platform_data; |
538 | struct hpb_dmae_device *hpbdev; | 540 | struct hpb_dmae_device *hpbdev; |
539 | struct dma_device *dma_dev; | 541 | struct dma_device *dma_dev; |
@@ -595,6 +597,10 @@ static int hpb_dmae_probe(struct platform_device *pdev) | |||
595 | 597 | ||
596 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | 598 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); |
597 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | 599 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); |
600 | dma_dev->src_addr_widths = widths; | ||
601 | dma_dev->dst_addr_widths = widths; | ||
602 | dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
603 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
598 | 604 | ||
599 | hpbdev->shdma_dev.ops = &hpb_dmae_ops; | 605 | hpbdev->shdma_dev.ops = &hpb_dmae_ops; |
600 | hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); | 606 | hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); |
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 3a2adb131d46..8ee383d339a5 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -729,57 +729,50 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( | |||
729 | return desc; | 729 | return desc; |
730 | } | 730 | } |
731 | 731 | ||
732 | static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 732 | static int shdma_terminate_all(struct dma_chan *chan) |
733 | unsigned long arg) | ||
734 | { | 733 | { |
735 | struct shdma_chan *schan = to_shdma_chan(chan); | 734 | struct shdma_chan *schan = to_shdma_chan(chan); |
736 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | 735 | struct shdma_dev *sdev = to_shdma_dev(chan->device); |
737 | const struct shdma_ops *ops = sdev->ops; | 736 | const struct shdma_ops *ops = sdev->ops; |
738 | struct dma_slave_config *config; | ||
739 | unsigned long flags; | 737 | unsigned long flags; |
740 | int ret; | ||
741 | 738 | ||
742 | switch (cmd) { | 739 | spin_lock_irqsave(&schan->chan_lock, flags); |
743 | case DMA_TERMINATE_ALL: | 740 | ops->halt_channel(schan); |
744 | spin_lock_irqsave(&schan->chan_lock, flags); | ||
745 | ops->halt_channel(schan); | ||
746 | 741 | ||
747 | if (ops->get_partial && !list_empty(&schan->ld_queue)) { | 742 | if (ops->get_partial && !list_empty(&schan->ld_queue)) { |
748 | /* Record partial transfer */ | 743 | /* Record partial transfer */ |
749 | struct shdma_desc *desc = list_first_entry(&schan->ld_queue, | 744 | struct shdma_desc *desc = list_first_entry(&schan->ld_queue, |
750 | struct shdma_desc, node); | 745 | struct shdma_desc, node); |
751 | desc->partial = ops->get_partial(schan, desc); | 746 | desc->partial = ops->get_partial(schan, desc); |
752 | } | 747 | } |
753 | 748 | ||
754 | spin_unlock_irqrestore(&schan->chan_lock, flags); | 749 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
755 | 750 | ||
756 | shdma_chan_ld_cleanup(schan, true); | 751 | shdma_chan_ld_cleanup(schan, true); |
757 | break; | ||
758 | case DMA_SLAVE_CONFIG: | ||
759 | /* | ||
760 | * So far only .slave_id is used, but the slave drivers are | ||
761 | * encouraged to also set a transfer direction and an address. | ||
762 | */ | ||
763 | if (!arg) | ||
764 | return -EINVAL; | ||
765 | /* | ||
766 | * We could lock this, but you shouldn't be configuring the | ||
767 | * channel, while using it... | ||
768 | */ | ||
769 | config = (struct dma_slave_config *)arg; | ||
770 | ret = shdma_setup_slave(schan, config->slave_id, | ||
771 | config->direction == DMA_DEV_TO_MEM ? | ||
772 | config->src_addr : config->dst_addr); | ||
773 | if (ret < 0) | ||
774 | return ret; | ||
775 | break; | ||
776 | default: | ||
777 | return -ENXIO; | ||
778 | } | ||
779 | 752 | ||
780 | return 0; | 753 | return 0; |
781 | } | 754 | } |
782 | 755 | ||
756 | static int shdma_config(struct dma_chan *chan, | ||
757 | struct dma_slave_config *config) | ||
758 | { | ||
759 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
760 | |||
761 | /* | ||
762 | * So far only .slave_id is used, but the slave drivers are | ||
763 | * encouraged to also set a transfer direction and an address. | ||
764 | */ | ||
765 | if (!config) | ||
766 | return -EINVAL; | ||
767 | /* | ||
768 | * We could lock this, but you shouldn't be configuring the | ||
769 | * channel, while using it... | ||
770 | */ | ||
771 | return shdma_setup_slave(schan, config->slave_id, | ||
772 | config->direction == DMA_DEV_TO_MEM ? | ||
773 | config->src_addr : config->dst_addr); | ||
774 | } | ||
775 | |||
783 | static void shdma_issue_pending(struct dma_chan *chan) | 776 | static void shdma_issue_pending(struct dma_chan *chan) |
784 | { | 777 | { |
785 | struct shdma_chan *schan = to_shdma_chan(chan); | 778 | struct shdma_chan *schan = to_shdma_chan(chan); |
@@ -1002,7 +995,8 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev, | |||
1002 | /* Compulsory for DMA_SLAVE fields */ | 995 | /* Compulsory for DMA_SLAVE fields */ |
1003 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; | 996 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; |
1004 | dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; | 997 | dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; |
1005 | dma_dev->device_control = shdma_control; | 998 | dma_dev->device_config = shdma_config; |
999 | dma_dev->device_terminate_all = shdma_terminate_all; | ||
1006 | 1000 | ||
1007 | dma_dev->dev = dev; | 1001 | dma_dev->dev = dev; |
1008 | 1002 | ||
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index aec8a84784a4..ce4cd6be07cf 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c | |||
@@ -684,6 +684,10 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match); | |||
684 | 684 | ||
685 | static int sh_dmae_probe(struct platform_device *pdev) | 685 | static int sh_dmae_probe(struct platform_device *pdev) |
686 | { | 686 | { |
687 | const enum dma_slave_buswidth widths = | ||
688 | DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | | ||
689 | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | | ||
690 | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES; | ||
687 | const struct sh_dmae_pdata *pdata; | 691 | const struct sh_dmae_pdata *pdata; |
688 | unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; | 692 | unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; |
689 | int chan_irq[SH_DMAE_MAX_CHANNELS]; | 693 | int chan_irq[SH_DMAE_MAX_CHANNELS]; |
@@ -746,6 +750,11 @@ static int sh_dmae_probe(struct platform_device *pdev) | |||
746 | return PTR_ERR(shdev->dmars); | 750 | return PTR_ERR(shdev->dmars); |
747 | } | 751 | } |
748 | 752 | ||
753 | dma_dev->src_addr_widths = widths; | ||
754 | dma_dev->dst_addr_widths = widths; | ||
755 | dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
756 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
757 | |||
749 | if (!pdata->slave_only) | 758 | if (!pdata->slave_only) |
750 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | 759 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); |
751 | if (pdata->slave && pdata->slave_num) | 760 | if (pdata->slave && pdata->slave_num) |
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 3492a5f91d31..d0086e9f2082 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -281,9 +281,10 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |||
281 | return cookie; | 281 | return cookie; |
282 | } | 282 | } |
283 | 283 | ||
284 | static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, | 284 | static int sirfsoc_dma_slave_config(struct dma_chan *chan, |
285 | struct dma_slave_config *config) | 285 | struct dma_slave_config *config) |
286 | { | 286 | { |
287 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
287 | unsigned long flags; | 288 | unsigned long flags; |
288 | 289 | ||
289 | if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | 290 | if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || |
@@ -297,8 +298,9 @@ static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, | |||
297 | return 0; | 298 | return 0; |
298 | } | 299 | } |
299 | 300 | ||
300 | static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) | 301 | static int sirfsoc_dma_terminate_all(struct dma_chan *chan) |
301 | { | 302 | { |
303 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
302 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | 304 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); |
303 | int cid = schan->chan.chan_id; | 305 | int cid = schan->chan.chan_id; |
304 | unsigned long flags; | 306 | unsigned long flags; |
@@ -327,8 +329,9 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) | |||
327 | return 0; | 329 | return 0; |
328 | } | 330 | } |
329 | 331 | ||
330 | static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan) | 332 | static int sirfsoc_dma_pause_chan(struct dma_chan *chan) |
331 | { | 333 | { |
334 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
332 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | 335 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); |
333 | int cid = schan->chan.chan_id; | 336 | int cid = schan->chan.chan_id; |
334 | unsigned long flags; | 337 | unsigned long flags; |
@@ -348,8 +351,9 @@ static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan) | |||
348 | return 0; | 351 | return 0; |
349 | } | 352 | } |
350 | 353 | ||
351 | static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan) | 354 | static int sirfsoc_dma_resume_chan(struct dma_chan *chan) |
352 | { | 355 | { |
356 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
353 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | 357 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); |
354 | int cid = schan->chan.chan_id; | 358 | int cid = schan->chan.chan_id; |
355 | unsigned long flags; | 359 | unsigned long flags; |
@@ -369,30 +373,6 @@ static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan) | |||
369 | return 0; | 373 | return 0; |
370 | } | 374 | } |
371 | 375 | ||
372 | static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
373 | unsigned long arg) | ||
374 | { | ||
375 | struct dma_slave_config *config; | ||
376 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
377 | |||
378 | switch (cmd) { | ||
379 | case DMA_PAUSE: | ||
380 | return sirfsoc_dma_pause_chan(schan); | ||
381 | case DMA_RESUME: | ||
382 | return sirfsoc_dma_resume_chan(schan); | ||
383 | case DMA_TERMINATE_ALL: | ||
384 | return sirfsoc_dma_terminate_all(schan); | ||
385 | case DMA_SLAVE_CONFIG: | ||
386 | config = (struct dma_slave_config *)arg; | ||
387 | return sirfsoc_dma_slave_config(schan, config); | ||
388 | |||
389 | default: | ||
390 | break; | ||
391 | } | ||
392 | |||
393 | return -ENOSYS; | ||
394 | } | ||
395 | |||
396 | /* Alloc channel resources */ | 376 | /* Alloc channel resources */ |
397 | static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | 377 | static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) |
398 | { | 378 | { |
@@ -648,18 +628,6 @@ EXPORT_SYMBOL(sirfsoc_dma_filter_id); | |||
648 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | 628 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ |
649 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | 629 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) |
650 | 630 | ||
651 | static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan, | ||
652 | struct dma_slave_caps *caps) | ||
653 | { | ||
654 | caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; | ||
655 | caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS; | ||
656 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
657 | caps->cmd_pause = true; | ||
658 | caps->cmd_terminate = true; | ||
659 | |||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec, | 631 | static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec, |
664 | struct of_dma *ofdma) | 632 | struct of_dma *ofdma) |
665 | { | 633 | { |
@@ -739,11 +707,16 @@ static int sirfsoc_dma_probe(struct platform_device *op) | |||
739 | dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; | 707 | dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; |
740 | dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; | 708 | dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; |
741 | dma->device_issue_pending = sirfsoc_dma_issue_pending; | 709 | dma->device_issue_pending = sirfsoc_dma_issue_pending; |
742 | dma->device_control = sirfsoc_dma_control; | 710 | dma->device_config = sirfsoc_dma_slave_config; |
711 | dma->device_pause = sirfsoc_dma_pause_chan; | ||
712 | dma->device_resume = sirfsoc_dma_resume_chan; | ||
713 | dma->device_terminate_all = sirfsoc_dma_terminate_all; | ||
743 | dma->device_tx_status = sirfsoc_dma_tx_status; | 714 | dma->device_tx_status = sirfsoc_dma_tx_status; |
744 | dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; | 715 | dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; |
745 | dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; | 716 | dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; |
746 | dma->device_slave_caps = sirfsoc_dma_device_slave_caps; | 717 | dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; |
718 | dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS; | ||
719 | dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
747 | 720 | ||
748 | INIT_LIST_HEAD(&dma->channels); | 721 | INIT_LIST_HEAD(&dma->channels); |
749 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | 722 | dma_cap_set(DMA_SLAVE, dma->cap_mask); |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 15d49461c0d2..68aca3334a17 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -1429,11 +1429,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c) | |||
1429 | return is_link; | 1429 | return is_link; |
1430 | } | 1430 | } |
1431 | 1431 | ||
1432 | static int d40_pause(struct d40_chan *d40c) | 1432 | static int d40_pause(struct dma_chan *chan) |
1433 | { | 1433 | { |
1434 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | ||
1434 | int res = 0; | 1435 | int res = 0; |
1435 | unsigned long flags; | 1436 | unsigned long flags; |
1436 | 1437 | ||
1438 | if (d40c->phy_chan == NULL) { | ||
1439 | chan_err(d40c, "Channel is not allocated!\n"); | ||
1440 | return -EINVAL; | ||
1441 | } | ||
1442 | |||
1437 | if (!d40c->busy) | 1443 | if (!d40c->busy) |
1438 | return 0; | 1444 | return 0; |
1439 | 1445 | ||
@@ -1448,11 +1454,17 @@ static int d40_pause(struct d40_chan *d40c) | |||
1448 | return res; | 1454 | return res; |
1449 | } | 1455 | } |
1450 | 1456 | ||
1451 | static int d40_resume(struct d40_chan *d40c) | 1457 | static int d40_resume(struct dma_chan *chan) |
1452 | { | 1458 | { |
1459 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | ||
1453 | int res = 0; | 1460 | int res = 0; |
1454 | unsigned long flags; | 1461 | unsigned long flags; |
1455 | 1462 | ||
1463 | if (d40c->phy_chan == NULL) { | ||
1464 | chan_err(d40c, "Channel is not allocated!\n"); | ||
1465 | return -EINVAL; | ||
1466 | } | ||
1467 | |||
1456 | if (!d40c->busy) | 1468 | if (!d40c->busy) |
1457 | return 0; | 1469 | return 0; |
1458 | 1470 | ||
@@ -2604,12 +2616,17 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
2604 | spin_unlock_irqrestore(&d40c->lock, flags); | 2616 | spin_unlock_irqrestore(&d40c->lock, flags); |
2605 | } | 2617 | } |
2606 | 2618 | ||
2607 | static void d40_terminate_all(struct dma_chan *chan) | 2619 | static int d40_terminate_all(struct dma_chan *chan) |
2608 | { | 2620 | { |
2609 | unsigned long flags; | 2621 | unsigned long flags; |
2610 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2622 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2611 | int ret; | 2623 | int ret; |
2612 | 2624 | ||
2625 | if (d40c->phy_chan == NULL) { | ||
2626 | chan_err(d40c, "Channel is not allocated!\n"); | ||
2627 | return -EINVAL; | ||
2628 | } | ||
2629 | |||
2613 | spin_lock_irqsave(&d40c->lock, flags); | 2630 | spin_lock_irqsave(&d40c->lock, flags); |
2614 | 2631 | ||
2615 | pm_runtime_get_sync(d40c->base->dev); | 2632 | pm_runtime_get_sync(d40c->base->dev); |
@@ -2627,6 +2644,7 @@ static void d40_terminate_all(struct dma_chan *chan) | |||
2627 | d40c->busy = false; | 2644 | d40c->busy = false; |
2628 | 2645 | ||
2629 | spin_unlock_irqrestore(&d40c->lock, flags); | 2646 | spin_unlock_irqrestore(&d40c->lock, flags); |
2647 | return 0; | ||
2630 | } | 2648 | } |
2631 | 2649 | ||
2632 | static int | 2650 | static int |
@@ -2673,6 +2691,11 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2673 | u32 src_maxburst, dst_maxburst; | 2691 | u32 src_maxburst, dst_maxburst; |
2674 | int ret; | 2692 | int ret; |
2675 | 2693 | ||
2694 | if (d40c->phy_chan == NULL) { | ||
2695 | chan_err(d40c, "Channel is not allocated!\n"); | ||
2696 | return -EINVAL; | ||
2697 | } | ||
2698 | |||
2676 | src_addr_width = config->src_addr_width; | 2699 | src_addr_width = config->src_addr_width; |
2677 | src_maxburst = config->src_maxburst; | 2700 | src_maxburst = config->src_maxburst; |
2678 | dst_addr_width = config->dst_addr_width; | 2701 | dst_addr_width = config->dst_addr_width; |
@@ -2781,35 +2804,6 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2781 | return 0; | 2804 | return 0; |
2782 | } | 2805 | } |
2783 | 2806 | ||
2784 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
2785 | unsigned long arg) | ||
2786 | { | ||
2787 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | ||
2788 | |||
2789 | if (d40c->phy_chan == NULL) { | ||
2790 | chan_err(d40c, "Channel is not allocated!\n"); | ||
2791 | return -EINVAL; | ||
2792 | } | ||
2793 | |||
2794 | switch (cmd) { | ||
2795 | case DMA_TERMINATE_ALL: | ||
2796 | d40_terminate_all(chan); | ||
2797 | return 0; | ||
2798 | case DMA_PAUSE: | ||
2799 | return d40_pause(d40c); | ||
2800 | case DMA_RESUME: | ||
2801 | return d40_resume(d40c); | ||
2802 | case DMA_SLAVE_CONFIG: | ||
2803 | return d40_set_runtime_config(chan, | ||
2804 | (struct dma_slave_config *) arg); | ||
2805 | default: | ||
2806 | break; | ||
2807 | } | ||
2808 | |||
2809 | /* Other commands are unimplemented */ | ||
2810 | return -ENXIO; | ||
2811 | } | ||
2812 | |||
2813 | /* Initialization functions */ | 2807 | /* Initialization functions */ |
2814 | 2808 | ||
2815 | static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | 2809 | static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, |
@@ -2870,7 +2864,10 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev) | |||
2870 | dev->device_free_chan_resources = d40_free_chan_resources; | 2864 | dev->device_free_chan_resources = d40_free_chan_resources; |
2871 | dev->device_issue_pending = d40_issue_pending; | 2865 | dev->device_issue_pending = d40_issue_pending; |
2872 | dev->device_tx_status = d40_tx_status; | 2866 | dev->device_tx_status = d40_tx_status; |
2873 | dev->device_control = d40_control; | 2867 | dev->device_config = d40_set_runtime_config; |
2868 | dev->device_pause = d40_pause; | ||
2869 | dev->device_resume = d40_resume; | ||
2870 | dev->device_terminate_all = d40_terminate_all; | ||
2874 | dev->dev = base->dev; | 2871 | dev->dev = base->dev; |
2875 | } | 2872 | } |
2876 | 2873 | ||
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 159f1736a16f..7ebcf9bec698 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c | |||
@@ -355,38 +355,6 @@ static void sun6i_dma_free_desc(struct virt_dma_desc *vd) | |||
355 | kfree(txd); | 355 | kfree(txd); |
356 | } | 356 | } |
357 | 357 | ||
358 | static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan) | ||
359 | { | ||
360 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); | ||
361 | struct sun6i_pchan *pchan = vchan->phy; | ||
362 | unsigned long flags; | ||
363 | LIST_HEAD(head); | ||
364 | |||
365 | spin_lock(&sdev->lock); | ||
366 | list_del_init(&vchan->node); | ||
367 | spin_unlock(&sdev->lock); | ||
368 | |||
369 | spin_lock_irqsave(&vchan->vc.lock, flags); | ||
370 | |||
371 | vchan_get_all_descriptors(&vchan->vc, &head); | ||
372 | |||
373 | if (pchan) { | ||
374 | writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE); | ||
375 | writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE); | ||
376 | |||
377 | vchan->phy = NULL; | ||
378 | pchan->vchan = NULL; | ||
379 | pchan->desc = NULL; | ||
380 | pchan->done = NULL; | ||
381 | } | ||
382 | |||
383 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | ||
384 | |||
385 | vchan_dma_desc_free_list(&vchan->vc, &head); | ||
386 | |||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) | 358 | static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) |
391 | { | 359 | { |
392 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); | 360 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); |
@@ -675,57 +643,92 @@ err_lli_free: | |||
675 | return NULL; | 643 | return NULL; |
676 | } | 644 | } |
677 | 645 | ||
678 | static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 646 | static int sun6i_dma_config(struct dma_chan *chan, |
679 | unsigned long arg) | 647 | struct dma_slave_config *config) |
648 | { | ||
649 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | ||
650 | |||
651 | memcpy(&vchan->cfg, config, sizeof(*config)); | ||
652 | |||
653 | return 0; | ||
654 | } | ||
655 | |||
656 | static int sun6i_dma_pause(struct dma_chan *chan) | ||
657 | { | ||
658 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | ||
659 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | ||
660 | struct sun6i_pchan *pchan = vchan->phy; | ||
661 | |||
662 | dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); | ||
663 | |||
664 | if (pchan) { | ||
665 | writel(DMA_CHAN_PAUSE_PAUSE, | ||
666 | pchan->base + DMA_CHAN_PAUSE); | ||
667 | } else { | ||
668 | spin_lock(&sdev->lock); | ||
669 | list_del_init(&vchan->node); | ||
670 | spin_unlock(&sdev->lock); | ||
671 | } | ||
672 | |||
673 | return 0; | ||
674 | } | ||
675 | |||
676 | static int sun6i_dma_resume(struct dma_chan *chan) | ||
680 | { | 677 | { |
681 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | 678 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); |
682 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | 679 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); |
683 | struct sun6i_pchan *pchan = vchan->phy; | 680 | struct sun6i_pchan *pchan = vchan->phy; |
684 | unsigned long flags; | 681 | unsigned long flags; |
685 | int ret = 0; | ||
686 | 682 | ||
687 | switch (cmd) { | 683 | dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); |
688 | case DMA_RESUME: | ||
689 | dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); | ||
690 | 684 | ||
691 | spin_lock_irqsave(&vchan->vc.lock, flags); | 685 | spin_lock_irqsave(&vchan->vc.lock, flags); |
692 | 686 | ||
693 | if (pchan) { | 687 | if (pchan) { |
694 | writel(DMA_CHAN_PAUSE_RESUME, | 688 | writel(DMA_CHAN_PAUSE_RESUME, |
695 | pchan->base + DMA_CHAN_PAUSE); | 689 | pchan->base + DMA_CHAN_PAUSE); |
696 | } else if (!list_empty(&vchan->vc.desc_issued)) { | 690 | } else if (!list_empty(&vchan->vc.desc_issued)) { |
697 | spin_lock(&sdev->lock); | 691 | spin_lock(&sdev->lock); |
698 | list_add_tail(&vchan->node, &sdev->pending); | 692 | list_add_tail(&vchan->node, &sdev->pending); |
699 | spin_unlock(&sdev->lock); | 693 | spin_unlock(&sdev->lock); |
700 | } | 694 | } |
701 | 695 | ||
702 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | 696 | spin_unlock_irqrestore(&vchan->vc.lock, flags); |
703 | break; | ||
704 | 697 | ||
705 | case DMA_PAUSE: | 698 | return 0; |
706 | dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); | 699 | } |
707 | 700 | ||
708 | if (pchan) { | 701 | static int sun6i_dma_terminate_all(struct dma_chan *chan) |
709 | writel(DMA_CHAN_PAUSE_PAUSE, | 702 | { |
710 | pchan->base + DMA_CHAN_PAUSE); | 703 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); |
711 | } else { | 704 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); |
712 | spin_lock(&sdev->lock); | 705 | struct sun6i_pchan *pchan = vchan->phy; |
713 | list_del_init(&vchan->node); | 706 | unsigned long flags; |
714 | spin_unlock(&sdev->lock); | 707 | LIST_HEAD(head); |
715 | } | 708 | |
716 | break; | 709 | spin_lock(&sdev->lock); |
717 | 710 | list_del_init(&vchan->node); | |
718 | case DMA_TERMINATE_ALL: | 711 | spin_unlock(&sdev->lock); |
719 | ret = sun6i_dma_terminate_all(vchan); | 712 | |
720 | break; | 713 | spin_lock_irqsave(&vchan->vc.lock, flags); |
721 | case DMA_SLAVE_CONFIG: | 714 | |
722 | memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config)); | 715 | vchan_get_all_descriptors(&vchan->vc, &head); |
723 | break; | 716 | |
724 | default: | 717 | if (pchan) { |
725 | ret = -ENXIO; | 718 | writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE); |
726 | break; | 719 | writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE); |
720 | |||
721 | vchan->phy = NULL; | ||
722 | pchan->vchan = NULL; | ||
723 | pchan->desc = NULL; | ||
724 | pchan->done = NULL; | ||
727 | } | 725 | } |
728 | return ret; | 726 | |
727 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | ||
728 | |||
729 | vchan_dma_desc_free_list(&vchan->vc, &head); | ||
730 | |||
731 | return 0; | ||
729 | } | 732 | } |
730 | 733 | ||
731 | static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, | 734 | static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, |
@@ -960,9 +963,20 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
960 | sdc->slave.device_issue_pending = sun6i_dma_issue_pending; | 963 | sdc->slave.device_issue_pending = sun6i_dma_issue_pending; |
961 | sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; | 964 | sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; |
962 | sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; | 965 | sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; |
963 | sdc->slave.device_control = sun6i_dma_control; | ||
964 | sdc->slave.copy_align = 4; | 966 | sdc->slave.copy_align = 4; |
965 | 967 | sdc->slave.device_config = sun6i_dma_config; | |
968 | sdc->slave.device_pause = sun6i_dma_pause; | ||
969 | sdc->slave.device_resume = sun6i_dma_resume; | ||
970 | sdc->slave.device_terminate_all = sun6i_dma_terminate_all; | ||
971 | sdc->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
972 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
973 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
974 | sdc->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
975 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
976 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
977 | sdc->slave.directions = BIT(DMA_DEV_TO_MEM) | | ||
978 | BIT(DMA_MEM_TO_DEV); | ||
979 | sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
966 | sdc->slave.dev = &pdev->dev; | 980 | sdc->slave.dev = &pdev->dev; |
967 | 981 | ||
968 | sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels, | 982 | sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels, |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index d8450c3f35f0..eaf585e8286b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -723,7 +723,7 @@ end: | |||
723 | return; | 723 | return; |
724 | } | 724 | } |
725 | 725 | ||
726 | static void tegra_dma_terminate_all(struct dma_chan *dc) | 726 | static int tegra_dma_terminate_all(struct dma_chan *dc) |
727 | { | 727 | { |
728 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | 728 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
729 | struct tegra_dma_sg_req *sgreq; | 729 | struct tegra_dma_sg_req *sgreq; |
@@ -736,7 +736,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) | |||
736 | spin_lock_irqsave(&tdc->lock, flags); | 736 | spin_lock_irqsave(&tdc->lock, flags); |
737 | if (list_empty(&tdc->pending_sg_req)) { | 737 | if (list_empty(&tdc->pending_sg_req)) { |
738 | spin_unlock_irqrestore(&tdc->lock, flags); | 738 | spin_unlock_irqrestore(&tdc->lock, flags); |
739 | return; | 739 | return 0; |
740 | } | 740 | } |
741 | 741 | ||
742 | if (!tdc->busy) | 742 | if (!tdc->busy) |
@@ -777,6 +777,7 @@ skip_dma_stop: | |||
777 | dma_desc->cb_count = 0; | 777 | dma_desc->cb_count = 0; |
778 | } | 778 | } |
779 | spin_unlock_irqrestore(&tdc->lock, flags); | 779 | spin_unlock_irqrestore(&tdc->lock, flags); |
780 | return 0; | ||
780 | } | 781 | } |
781 | 782 | ||
782 | static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, | 783 | static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, |
@@ -827,25 +828,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, | |||
827 | return ret; | 828 | return ret; |
828 | } | 829 | } |
829 | 830 | ||
830 | static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd, | ||
831 | unsigned long arg) | ||
832 | { | ||
833 | switch (cmd) { | ||
834 | case DMA_SLAVE_CONFIG: | ||
835 | return tegra_dma_slave_config(dc, | ||
836 | (struct dma_slave_config *)arg); | ||
837 | |||
838 | case DMA_TERMINATE_ALL: | ||
839 | tegra_dma_terminate_all(dc); | ||
840 | return 0; | ||
841 | |||
842 | default: | ||
843 | break; | ||
844 | } | ||
845 | |||
846 | return -ENXIO; | ||
847 | } | ||
848 | |||
849 | static inline int get_bus_width(struct tegra_dma_channel *tdc, | 831 | static inline int get_bus_width(struct tegra_dma_channel *tdc, |
850 | enum dma_slave_buswidth slave_bw) | 832 | enum dma_slave_buswidth slave_bw) |
851 | { | 833 | { |
@@ -1443,7 +1425,23 @@ static int tegra_dma_probe(struct platform_device *pdev) | |||
1443 | tegra_dma_free_chan_resources; | 1425 | tegra_dma_free_chan_resources; |
1444 | tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; | 1426 | tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; |
1445 | tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; | 1427 | tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; |
1446 | tdma->dma_dev.device_control = tegra_dma_device_control; | 1428 | tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1429 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1430 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | ||
1431 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); | ||
1432 | tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1433 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1434 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | ||
1435 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); | ||
1436 | tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1437 | /* | ||
1438 | * XXX The hardware appears to support | ||
1439 | * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's | ||
1440 | * only used by this driver during tegra_dma_terminate_all() | ||
1441 | */ | ||
1442 | tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
1443 | tdma->dma_dev.device_config = tegra_dma_slave_config; | ||
1444 | tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all; | ||
1447 | tdma->dma_dev.device_tx_status = tegra_dma_tx_status; | 1445 | tdma->dma_dev.device_tx_status = tegra_dma_tx_status; |
1448 | tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; | 1446 | tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; |
1449 | 1447 | ||
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 2407ccf1a64b..c4c3d93fdd1b 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -561,8 +561,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | |||
561 | return &td_desc->txd; | 561 | return &td_desc->txd; |
562 | } | 562 | } |
563 | 563 | ||
564 | static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 564 | static int td_terminate_all(struct dma_chan *chan) |
565 | unsigned long arg) | ||
566 | { | 565 | { |
567 | struct timb_dma_chan *td_chan = | 566 | struct timb_dma_chan *td_chan = |
568 | container_of(chan, struct timb_dma_chan, chan); | 567 | container_of(chan, struct timb_dma_chan, chan); |
@@ -570,9 +569,6 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
570 | 569 | ||
571 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 570 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
572 | 571 | ||
573 | if (cmd != DMA_TERMINATE_ALL) | ||
574 | return -ENXIO; | ||
575 | |||
576 | /* first the easy part, put the queue into the free list */ | 572 | /* first the easy part, put the queue into the free list */ |
577 | spin_lock_bh(&td_chan->lock); | 573 | spin_lock_bh(&td_chan->lock); |
578 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, | 574 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, |
@@ -697,7 +693,7 @@ static int td_probe(struct platform_device *pdev) | |||
697 | dma_cap_set(DMA_SLAVE, td->dma.cap_mask); | 693 | dma_cap_set(DMA_SLAVE, td->dma.cap_mask); |
698 | dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); | 694 | dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); |
699 | td->dma.device_prep_slave_sg = td_prep_slave_sg; | 695 | td->dma.device_prep_slave_sg = td_prep_slave_sg; |
700 | td->dma.device_control = td_control; | 696 | td->dma.device_terminate_all = td_terminate_all; |
701 | 697 | ||
702 | td->dma.dev = &pdev->dev; | 698 | td->dma.dev = &pdev->dev; |
703 | 699 | ||
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 0659ec9c4488..8849318b32b7 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -901,17 +901,12 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
901 | return &first->txd; | 901 | return &first->txd; |
902 | } | 902 | } |
903 | 903 | ||
904 | static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 904 | static int txx9dmac_terminate_all(struct dma_chan *chan) |
905 | unsigned long arg) | ||
906 | { | 905 | { |
907 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 906 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
908 | struct txx9dmac_desc *desc, *_desc; | 907 | struct txx9dmac_desc *desc, *_desc; |
909 | LIST_HEAD(list); | 908 | LIST_HEAD(list); |
910 | 909 | ||
911 | /* Only supports DMA_TERMINATE_ALL */ | ||
912 | if (cmd != DMA_TERMINATE_ALL) | ||
913 | return -EINVAL; | ||
914 | |||
915 | dev_vdbg(chan2dev(chan), "terminate_all\n"); | 910 | dev_vdbg(chan2dev(chan), "terminate_all\n"); |
916 | spin_lock_bh(&dc->lock); | 911 | spin_lock_bh(&dc->lock); |
917 | 912 | ||
@@ -1109,7 +1104,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev) | |||
1109 | dc->dma.dev = &pdev->dev; | 1104 | dc->dma.dev = &pdev->dev; |
1110 | dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; | 1105 | dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; |
1111 | dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; | 1106 | dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; |
1112 | dc->dma.device_control = txx9dmac_control; | 1107 | dc->dma.device_terminate_all = txx9dmac_terminate_all; |
1113 | dc->dma.device_tx_status = txx9dmac_tx_status; | 1108 | dc->dma.device_tx_status = txx9dmac_tx_status; |
1114 | dc->dma.device_issue_pending = txx9dmac_issue_pending; | 1109 | dc->dma.device_issue_pending = txx9dmac_issue_pending; |
1115 | if (pdata && pdata->memcpy_chan == ch) { | 1110 | if (pdata && pdata->memcpy_chan == ch) { |
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 4a3a8f3137b3..bdd2a5dd7220 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c | |||
@@ -1001,13 +1001,17 @@ error: | |||
1001 | * xilinx_vdma_terminate_all - Halt the channel and free descriptors | 1001 | * xilinx_vdma_terminate_all - Halt the channel and free descriptors |
1002 | * @chan: Driver specific VDMA Channel pointer | 1002 | * @chan: Driver specific VDMA Channel pointer |
1003 | */ | 1003 | */ |
1004 | static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan) | 1004 | static int xilinx_vdma_terminate_all(struct dma_chan *dchan) |
1005 | { | 1005 | { |
1006 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
1007 | |||
1006 | /* Halt the DMA engine */ | 1008 | /* Halt the DMA engine */ |
1007 | xilinx_vdma_halt(chan); | 1009 | xilinx_vdma_halt(chan); |
1008 | 1010 | ||
1009 | /* Remove and free all of the descriptors in the lists */ | 1011 | /* Remove and free all of the descriptors in the lists */ |
1010 | xilinx_vdma_free_descriptors(chan); | 1012 | xilinx_vdma_free_descriptors(chan); |
1013 | |||
1014 | return 0; | ||
1011 | } | 1015 | } |
1012 | 1016 | ||
1013 | /** | 1017 | /** |
@@ -1075,27 +1079,6 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | |||
1075 | } | 1079 | } |
1076 | EXPORT_SYMBOL(xilinx_vdma_channel_set_config); | 1080 | EXPORT_SYMBOL(xilinx_vdma_channel_set_config); |
1077 | 1081 | ||
1078 | /** | ||
1079 | * xilinx_vdma_device_control - Configure DMA channel of the device | ||
1080 | * @dchan: DMA Channel pointer | ||
1081 | * @cmd: DMA control command | ||
1082 | * @arg: Channel configuration | ||
1083 | * | ||
1084 | * Return: '0' on success and failure value on error | ||
1085 | */ | ||
1086 | static int xilinx_vdma_device_control(struct dma_chan *dchan, | ||
1087 | enum dma_ctrl_cmd cmd, unsigned long arg) | ||
1088 | { | ||
1089 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
1090 | |||
1091 | if (cmd != DMA_TERMINATE_ALL) | ||
1092 | return -ENXIO; | ||
1093 | |||
1094 | xilinx_vdma_terminate_all(chan); | ||
1095 | |||
1096 | return 0; | ||
1097 | } | ||
1098 | |||
1099 | /* ----------------------------------------------------------------------------- | 1082 | /* ----------------------------------------------------------------------------- |
1100 | * Probe and remove | 1083 | * Probe and remove |
1101 | */ | 1084 | */ |
@@ -1300,7 +1283,7 @@ static int xilinx_vdma_probe(struct platform_device *pdev) | |||
1300 | xilinx_vdma_free_chan_resources; | 1283 | xilinx_vdma_free_chan_resources; |
1301 | xdev->common.device_prep_interleaved_dma = | 1284 | xdev->common.device_prep_interleaved_dma = |
1302 | xilinx_vdma_dma_prep_interleaved; | 1285 | xilinx_vdma_dma_prep_interleaved; |
1303 | xdev->common.device_control = xilinx_vdma_device_control; | 1286 | xdev->common.device_terminate_all = xilinx_vdma_terminate_all; |
1304 | xdev->common.device_tx_status = xilinx_vdma_tx_status; | 1287 | xdev->common.device_tx_status = xilinx_vdma_tx_status; |
1305 | xdev->common.device_issue_pending = xilinx_vdma_issue_pending; | 1288 | xdev->common.device_issue_pending = xilinx_vdma_issue_pending; |
1306 | 1289 | ||
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index f64c5decb747..47295940a868 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c | |||
@@ -815,8 +815,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, | |||
815 | return txd; | 815 | return txd; |
816 | } | 816 | } |
817 | 817 | ||
818 | static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | 818 | static int tsi721_terminate_all(struct dma_chan *dchan) |
819 | unsigned long arg) | ||
820 | { | 819 | { |
821 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | 820 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); |
822 | struct tsi721_tx_desc *desc, *_d; | 821 | struct tsi721_tx_desc *desc, *_d; |
@@ -825,9 +824,6 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | |||
825 | 824 | ||
826 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); | 825 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); |
827 | 826 | ||
828 | if (cmd != DMA_TERMINATE_ALL) | ||
829 | return -ENOSYS; | ||
830 | |||
831 | spin_lock_bh(&bdma_chan->lock); | 827 | spin_lock_bh(&bdma_chan->lock); |
832 | 828 | ||
833 | bdma_chan->active = false; | 829 | bdma_chan->active = false; |
@@ -901,7 +897,7 @@ int tsi721_register_dma(struct tsi721_device *priv) | |||
901 | mport->dma.device_tx_status = tsi721_tx_status; | 897 | mport->dma.device_tx_status = tsi721_tx_status; |
902 | mport->dma.device_issue_pending = tsi721_issue_pending; | 898 | mport->dma.device_issue_pending = tsi721_issue_pending; |
903 | mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; | 899 | mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; |
904 | mport->dma.device_control = tsi721_device_control; | 900 | mport->dma.device_terminate_all = tsi721_terminate_all; |
905 | 901 | ||
906 | err = dma_async_device_register(&mport->dma); | 902 | err = dma_async_device_register(&mport->dma); |
907 | if (err) | 903 | if (err) |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 40cd75e21ea2..50745e3a8a3f 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -189,25 +189,6 @@ enum dma_ctrl_flags { | |||
189 | }; | 189 | }; |
190 | 190 | ||
191 | /** | 191 | /** |
192 | * enum dma_ctrl_cmd - DMA operations that can optionally be exercised | ||
193 | * on a running channel. | ||
194 | * @DMA_TERMINATE_ALL: terminate all ongoing transfers | ||
195 | * @DMA_PAUSE: pause ongoing transfers | ||
196 | * @DMA_RESUME: resume paused transfer | ||
197 | * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers | ||
198 | * that need to runtime reconfigure the slave channels (as opposed to passing | ||
199 | * configuration data in statically from the platform). An additional | ||
200 | * argument of struct dma_slave_config must be passed in with this | ||
201 | * command. | ||
202 | */ | ||
203 | enum dma_ctrl_cmd { | ||
204 | DMA_TERMINATE_ALL, | ||
205 | DMA_PAUSE, | ||
206 | DMA_RESUME, | ||
207 | DMA_SLAVE_CONFIG, | ||
208 | }; | ||
209 | |||
210 | /** | ||
211 | * enum sum_check_bits - bit position of pq_check_flags | 192 | * enum sum_check_bits - bit position of pq_check_flags |
212 | */ | 193 | */ |
213 | enum sum_check_bits { | 194 | enum sum_check_bits { |
@@ -336,9 +317,8 @@ enum dma_slave_buswidth { | |||
336 | * This struct is passed in as configuration data to a DMA engine | 317 | * This struct is passed in as configuration data to a DMA engine |
337 | * in order to set up a certain channel for DMA transport at runtime. | 318 | * in order to set up a certain channel for DMA transport at runtime. |
338 | * The DMA device/engine has to provide support for an additional | 319 | * The DMA device/engine has to provide support for an additional |
339 | * command in the channel config interface, DMA_SLAVE_CONFIG | 320 | * callback in the dma_device structure, device_config and this struct |
340 | * and this struct will then be passed in as an argument to the | 321 | * will then be passed in as an argument to the function. |
341 | * DMA engine device_control() function. | ||
342 | * | 322 | * |
343 | * The rationale for adding configuration information to this struct is as | 323 | * The rationale for adding configuration information to this struct is as |
344 | * follows: if it is likely that more than one DMA slave controllers in | 324 | * follows: if it is likely that more than one DMA slave controllers in |
@@ -387,7 +367,7 @@ enum dma_residue_granularity { | |||
387 | /* struct dma_slave_caps - expose capabilities of a slave channel only | 367 | /* struct dma_slave_caps - expose capabilities of a slave channel only |
388 | * | 368 | * |
389 | * @src_addr_widths: bit mask of src addr widths the channel supports | 369 | * @src_addr_widths: bit mask of src addr widths the channel supports |
390 | * @dstn_addr_widths: bit mask of dstn addr widths the channel supports | 370 | * @dst_addr_widths: bit mask of dstn addr widths the channel supports |
391 | * @directions: bit mask of slave direction the channel supported | 371 | * @directions: bit mask of slave direction the channel supported |
392 | * since the enum dma_transfer_direction is not defined as bits for each | 372 | * since the enum dma_transfer_direction is not defined as bits for each |
393 | * type of direction, the dma controller should fill (1 << <TYPE>) and same | 373 | * type of direction, the dma controller should fill (1 << <TYPE>) and same |
@@ -398,7 +378,7 @@ enum dma_residue_granularity { | |||
398 | */ | 378 | */ |
399 | struct dma_slave_caps { | 379 | struct dma_slave_caps { |
400 | u32 src_addr_widths; | 380 | u32 src_addr_widths; |
401 | u32 dstn_addr_widths; | 381 | u32 dst_addr_widths; |
402 | u32 directions; | 382 | u32 directions; |
403 | bool cmd_pause; | 383 | bool cmd_pause; |
404 | bool cmd_terminate; | 384 | bool cmd_terminate; |
@@ -594,6 +574,14 @@ struct dma_tx_state { | |||
594 | * @fill_align: alignment shift for memset operations | 574 | * @fill_align: alignment shift for memset operations |
595 | * @dev_id: unique device ID | 575 | * @dev_id: unique device ID |
596 | * @dev: struct device reference for dma mapping api | 576 | * @dev: struct device reference for dma mapping api |
577 | * @src_addr_widths: bit mask of src addr widths the device supports | ||
578 | * @dst_addr_widths: bit mask of dst addr widths the device supports | ||
579 | * @directions: bit mask of slave direction the device supports since | ||
580 | * the enum dma_transfer_direction is not defined as bits for | ||
581 | * each type of direction, the dma controller should fill (1 << | ||
582 | * <TYPE>) and same should be checked by controller as well | ||
583 | * @residue_granularity: granularity of the transfer residue reported | ||
584 | * by tx_status | ||
597 | * @device_alloc_chan_resources: allocate resources and return the | 585 | * @device_alloc_chan_resources: allocate resources and return the |
598 | * number of allocated descriptors | 586 | * number of allocated descriptors |
599 | * @device_free_chan_resources: release DMA channel's resources | 587 | * @device_free_chan_resources: release DMA channel's resources |
@@ -608,14 +596,19 @@ struct dma_tx_state { | |||
608 | * The function takes a buffer of size buf_len. The callback function will | 596 | * The function takes a buffer of size buf_len. The callback function will |
609 | * be called after period_len bytes have been transferred. | 597 | * be called after period_len bytes have been transferred. |
610 | * @device_prep_interleaved_dma: Transfer expression in a generic way. | 598 | * @device_prep_interleaved_dma: Transfer expression in a generic way. |
611 | * @device_control: manipulate all pending operations on a channel, returns | 599 | * @device_config: Pushes a new configuration to a channel, return 0 or an error |
612 | * zero or error code | 600 | * code |
601 | * @device_pause: Pauses any transfer happening on a channel. Returns | ||
602 | * 0 or an error code | ||
603 | * @device_resume: Resumes any transfer on a channel previously | ||
604 | * paused. Returns 0 or an error code | ||
605 | * @device_terminate_all: Aborts all transfers on a channel. Returns 0 | ||
606 | * or an error code | ||
613 | * @device_tx_status: poll for transaction completion, the optional | 607 | * @device_tx_status: poll for transaction completion, the optional |
614 | * txstate parameter can be supplied with a pointer to get a | 608 | * txstate parameter can be supplied with a pointer to get a |
615 | * struct with auxiliary transfer status information, otherwise the call | 609 | * struct with auxiliary transfer status information, otherwise the call |
616 | * will just return a simple status code | 610 | * will just return a simple status code |
617 | * @device_issue_pending: push pending transactions to hardware | 611 | * @device_issue_pending: push pending transactions to hardware |
618 | * @device_slave_caps: return the slave channel capabilities | ||
619 | */ | 612 | */ |
620 | struct dma_device { | 613 | struct dma_device { |
621 | 614 | ||
@@ -635,14 +628,19 @@ struct dma_device { | |||
635 | int dev_id; | 628 | int dev_id; |
636 | struct device *dev; | 629 | struct device *dev; |
637 | 630 | ||
631 | u32 src_addr_widths; | ||
632 | u32 dst_addr_widths; | ||
633 | u32 directions; | ||
634 | enum dma_residue_granularity residue_granularity; | ||
635 | |||
638 | int (*device_alloc_chan_resources)(struct dma_chan *chan); | 636 | int (*device_alloc_chan_resources)(struct dma_chan *chan); |
639 | void (*device_free_chan_resources)(struct dma_chan *chan); | 637 | void (*device_free_chan_resources)(struct dma_chan *chan); |
640 | 638 | ||
641 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 639 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( |
642 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 640 | struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, |
643 | size_t len, unsigned long flags); | 641 | size_t len, unsigned long flags); |
644 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( | 642 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( |
645 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 643 | struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, |
646 | unsigned int src_cnt, size_t len, unsigned long flags); | 644 | unsigned int src_cnt, size_t len, unsigned long flags); |
647 | struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( | 645 | struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( |
648 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, | 646 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, |
@@ -674,31 +672,26 @@ struct dma_device { | |||
674 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( | 672 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( |
675 | struct dma_chan *chan, struct dma_interleaved_template *xt, | 673 | struct dma_chan *chan, struct dma_interleaved_template *xt, |
676 | unsigned long flags); | 674 | unsigned long flags); |
677 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 675 | |
678 | unsigned long arg); | 676 | int (*device_config)(struct dma_chan *chan, |
677 | struct dma_slave_config *config); | ||
678 | int (*device_pause)(struct dma_chan *chan); | ||
679 | int (*device_resume)(struct dma_chan *chan); | ||
680 | int (*device_terminate_all)(struct dma_chan *chan); | ||
679 | 681 | ||
680 | enum dma_status (*device_tx_status)(struct dma_chan *chan, | 682 | enum dma_status (*device_tx_status)(struct dma_chan *chan, |
681 | dma_cookie_t cookie, | 683 | dma_cookie_t cookie, |
682 | struct dma_tx_state *txstate); | 684 | struct dma_tx_state *txstate); |
683 | void (*device_issue_pending)(struct dma_chan *chan); | 685 | void (*device_issue_pending)(struct dma_chan *chan); |
684 | int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps); | ||
685 | }; | 686 | }; |
686 | 687 | ||
687 | static inline int dmaengine_device_control(struct dma_chan *chan, | ||
688 | enum dma_ctrl_cmd cmd, | ||
689 | unsigned long arg) | ||
690 | { | ||
691 | if (chan->device->device_control) | ||
692 | return chan->device->device_control(chan, cmd, arg); | ||
693 | |||
694 | return -ENOSYS; | ||
695 | } | ||
696 | |||
697 | static inline int dmaengine_slave_config(struct dma_chan *chan, | 688 | static inline int dmaengine_slave_config(struct dma_chan *chan, |
698 | struct dma_slave_config *config) | 689 | struct dma_slave_config *config) |
699 | { | 690 | { |
700 | return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, | 691 | if (chan->device->device_config) |
701 | (unsigned long)config); | 692 | return chan->device->device_config(chan, config); |
693 | |||
694 | return -ENOSYS; | ||
702 | } | 695 | } |
703 | 696 | ||
704 | static inline bool is_slave_direction(enum dma_transfer_direction direction) | 697 | static inline bool is_slave_direction(enum dma_transfer_direction direction) |
@@ -765,34 +758,28 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( | |||
765 | src_sg, src_nents, flags); | 758 | src_sg, src_nents, flags); |
766 | } | 759 | } |
767 | 760 | ||
768 | static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) | ||
769 | { | ||
770 | if (!chan || !caps) | ||
771 | return -EINVAL; | ||
772 | |||
773 | /* check if the channel supports slave transactions */ | ||
774 | if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits)) | ||
775 | return -ENXIO; | ||
776 | |||
777 | if (chan->device->device_slave_caps) | ||
778 | return chan->device->device_slave_caps(chan, caps); | ||
779 | |||
780 | return -ENXIO; | ||
781 | } | ||
782 | |||
783 | static inline int dmaengine_terminate_all(struct dma_chan *chan) | 761 | static inline int dmaengine_terminate_all(struct dma_chan *chan) |
784 | { | 762 | { |
785 | return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); | 763 | if (chan->device->device_terminate_all) |
764 | return chan->device->device_terminate_all(chan); | ||
765 | |||
766 | return -ENOSYS; | ||
786 | } | 767 | } |
787 | 768 | ||
788 | static inline int dmaengine_pause(struct dma_chan *chan) | 769 | static inline int dmaengine_pause(struct dma_chan *chan) |
789 | { | 770 | { |
790 | return dmaengine_device_control(chan, DMA_PAUSE, 0); | 771 | if (chan->device->device_pause) |
772 | return chan->device->device_pause(chan); | ||
773 | |||
774 | return -ENOSYS; | ||
791 | } | 775 | } |
792 | 776 | ||
793 | static inline int dmaengine_resume(struct dma_chan *chan) | 777 | static inline int dmaengine_resume(struct dma_chan *chan) |
794 | { | 778 | { |
795 | return dmaengine_device_control(chan, DMA_RESUME, 0); | 779 | if (chan->device->device_resume) |
780 | return chan->device->device_resume(chan); | ||
781 | |||
782 | return -ENOSYS; | ||
796 | } | 783 | } |
797 | 784 | ||
798 | static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, | 785 | static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, |
@@ -1059,6 +1046,7 @@ struct dma_chan *dma_request_slave_channel_reason(struct device *dev, | |||
1059 | const char *name); | 1046 | const char *name); |
1060 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); | 1047 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); |
1061 | void dma_release_channel(struct dma_chan *chan); | 1048 | void dma_release_channel(struct dma_chan *chan); |
1049 | int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); | ||
1062 | #else | 1050 | #else |
1063 | static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | 1051 | static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) |
1064 | { | 1052 | { |
@@ -1093,6 +1081,11 @@ static inline struct dma_chan *dma_request_slave_channel(struct device *dev, | |||
1093 | static inline void dma_release_channel(struct dma_chan *chan) | 1081 | static inline void dma_release_channel(struct dma_chan *chan) |
1094 | { | 1082 | { |
1095 | } | 1083 | } |
1084 | static inline int dma_get_slave_caps(struct dma_chan *chan, | ||
1085 | struct dma_slave_caps *caps) | ||
1086 | { | ||
1087 | return -ENXIO; | ||
1088 | } | ||
1096 | #endif | 1089 | #endif |
1097 | 1090 | ||
1098 | /* --- DMA device --- */ | 1091 | /* --- DMA device --- */ |
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index b329b84bc5af..851f7afcd5dc 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c | |||
@@ -151,7 +151,7 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substrea | |||
151 | hw.info |= SNDRV_PCM_INFO_BATCH; | 151 | hw.info |= SNDRV_PCM_INFO_BATCH; |
152 | 152 | ||
153 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 153 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
154 | addr_widths = dma_caps.dstn_addr_widths; | 154 | addr_widths = dma_caps.dst_addr_widths; |
155 | else | 155 | else |
156 | addr_widths = dma_caps.src_addr_widths; | 156 | addr_widths = dma_caps.src_addr_widths; |
157 | } | 157 | } |