diff options
author | Andy Shevchenko <andriy.shevchenko@linux.intel.com> | 2012-09-21 08:05:49 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@linux.intel.com> | 2012-09-27 06:05:23 -0400 |
commit | fed2574b3c9f44556ed4f5cb17f63b15edd87d06 (patch) | |
tree | 1b080d85ea7e32778dae4d0dfce9d961bd5efee0 /drivers/dma | |
parent | a09820043c9e11149145a1ec221eed4a7b42dcce (diff) |
dw_dmac: introduce software emulation of LLP transfers
Some controllers have the reduced functionality where the LLP multi block
transfers are not supported. This patch introduces a support of such
controllers. In case of memory copy or scatter-gather lists it emulates LLP
transfers via bunch of the regular single block ones.
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/dw_dmac.c | 99 | ||||
-rw-r--r-- | drivers/dma/dw_dmac_regs.h | 6 |
2 files changed, 101 insertions, 4 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index cdc0a1fe2c64..9ca9ca41b83e 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -232,10 +232,29 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
232 | 232 | ||
233 | /*----------------------------------------------------------------------*/ | 233 | /*----------------------------------------------------------------------*/ |
234 | 234 | ||
235 | /* Perform single block transfer */ | ||
236 | static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | ||
237 | struct dw_desc *desc) | ||
238 | { | ||
239 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
240 | u32 ctllo; | ||
241 | |||
242 | /* Software emulation of LLP mode relies on interrupts to continue | ||
243 | * multi block transfer. */ | ||
244 | ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; | ||
245 | |||
246 | channel_writel(dwc, SAR, desc->lli.sar); | ||
247 | channel_writel(dwc, DAR, desc->lli.dar); | ||
248 | channel_writel(dwc, CTL_LO, ctllo); | ||
249 | channel_writel(dwc, CTL_HI, desc->lli.ctlhi); | ||
250 | channel_set_bit(dw, CH_EN, dwc->mask); | ||
251 | } | ||
252 | |||
235 | /* Called with dwc->lock held and bh disabled */ | 253 | /* Called with dwc->lock held and bh disabled */ |
236 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | 254 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) |
237 | { | 255 | { |
238 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 256 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
257 | unsigned long was_soft_llp; | ||
239 | 258 | ||
240 | /* ASSERT: channel is idle */ | 259 | /* ASSERT: channel is idle */ |
241 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 260 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
@@ -247,6 +266,26 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
247 | return; | 266 | return; |
248 | } | 267 | } |
249 | 268 | ||
269 | if (dwc->nollp) { | ||
270 | was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, | ||
271 | &dwc->flags); | ||
272 | if (was_soft_llp) { | ||
273 | dev_err(chan2dev(&dwc->chan), | ||
274 | "BUG: Attempted to start new LLP transfer " | ||
275 | "inside ongoing one\n"); | ||
276 | return; | ||
277 | } | ||
278 | |||
279 | dwc_initialize(dwc); | ||
280 | |||
281 | dwc->tx_list = &first->tx_list; | ||
282 | dwc->tx_node_active = first->tx_list.next; | ||
283 | |||
284 | dwc_do_single_block(dwc, first); | ||
285 | |||
286 | return; | ||
287 | } | ||
288 | |||
250 | dwc_initialize(dwc); | 289 | dwc_initialize(dwc); |
251 | 290 | ||
252 | channel_writel(dwc, LLP, first->txd.phys); | 291 | channel_writel(dwc, LLP, first->txd.phys); |
@@ -558,8 +597,36 @@ static void dw_dma_tasklet(unsigned long data) | |||
558 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); | 597 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
559 | else if (status_err & (1 << i)) | 598 | else if (status_err & (1 << i)) |
560 | dwc_handle_error(dw, dwc); | 599 | dwc_handle_error(dw, dwc); |
561 | else if (status_xfer & (1 << i)) | 600 | else if (status_xfer & (1 << i)) { |
601 | unsigned long flags; | ||
602 | |||
603 | spin_lock_irqsave(&dwc->lock, flags); | ||
604 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | ||
605 | if (dwc->tx_node_active != dwc->tx_list) { | ||
606 | struct dw_desc *desc = | ||
607 | list_entry(dwc->tx_node_active, | ||
608 | struct dw_desc, | ||
609 | desc_node); | ||
610 | |||
611 | dma_writel(dw, CLEAR.XFER, dwc->mask); | ||
612 | |||
613 | /* move pointer to next descriptor */ | ||
614 | dwc->tx_node_active = | ||
615 | dwc->tx_node_active->next; | ||
616 | |||
617 | dwc_do_single_block(dwc, desc); | ||
618 | |||
619 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
620 | continue; | ||
621 | } else { | ||
622 | /* we are done here */ | ||
623 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | ||
624 | } | ||
625 | } | ||
626 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
627 | |||
562 | dwc_scan_descriptors(dw, dwc); | 628 | dwc_scan_descriptors(dw, dwc); |
629 | } | ||
563 | } | 630 | } |
564 | 631 | ||
565 | /* | 632 | /* |
@@ -962,6 +1029,8 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
962 | } else if (cmd == DMA_TERMINATE_ALL) { | 1029 | } else if (cmd == DMA_TERMINATE_ALL) { |
963 | spin_lock_irqsave(&dwc->lock, flags); | 1030 | spin_lock_irqsave(&dwc->lock, flags); |
964 | 1031 | ||
1032 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | ||
1033 | |||
965 | dwc_chan_disable(dw, dwc); | 1034 | dwc_chan_disable(dw, dwc); |
966 | 1035 | ||
967 | dwc->paused = false; | 1036 | dwc->paused = false; |
@@ -1204,6 +1273,13 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1204 | unsigned long flags; | 1273 | unsigned long flags; |
1205 | 1274 | ||
1206 | spin_lock_irqsave(&dwc->lock, flags); | 1275 | spin_lock_irqsave(&dwc->lock, flags); |
1276 | if (dwc->nollp) { | ||
1277 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
1278 | dev_dbg(chan2dev(&dwc->chan), | ||
1279 | "channel doesn't support LLP transfers\n"); | ||
1280 | return ERR_PTR(-EINVAL); | ||
1281 | } | ||
1282 | |||
1207 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { | 1283 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
1208 | spin_unlock_irqrestore(&dwc->lock, flags); | 1284 | spin_unlock_irqrestore(&dwc->lock, flags); |
1209 | dev_dbg(chan2dev(&dwc->chan), | 1285 | dev_dbg(chan2dev(&dwc->chan), |
@@ -1471,6 +1547,7 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
1471 | INIT_LIST_HEAD(&dw->dma.channels); | 1547 | INIT_LIST_HEAD(&dw->dma.channels); |
1472 | for (i = 0; i < nr_channels; i++) { | 1548 | for (i = 0; i < nr_channels; i++) { |
1473 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1549 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1550 | int r = nr_channels - i - 1; | ||
1474 | 1551 | ||
1475 | dwc->chan.device = &dw->dma; | 1552 | dwc->chan.device = &dw->dma; |
1476 | dma_cookie_init(&dwc->chan); | 1553 | dma_cookie_init(&dwc->chan); |
@@ -1482,7 +1559,7 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
1482 | 1559 | ||
1483 | /* 7 is highest priority & 0 is lowest. */ | 1560 | /* 7 is highest priority & 0 is lowest. */ |
1484 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | 1561 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) |
1485 | dwc->priority = nr_channels - i - 1; | 1562 | dwc->priority = r; |
1486 | else | 1563 | else |
1487 | dwc->priority = i; | 1564 | dwc->priority = i; |
1488 | 1565 | ||
@@ -1499,14 +1576,28 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
1499 | dwc->dw = dw; | 1576 | dwc->dw = dw; |
1500 | 1577 | ||
1501 | /* hardware configuration */ | 1578 | /* hardware configuration */ |
1502 | if (autocfg) | 1579 | if (autocfg) { |
1580 | unsigned int dwc_params; | ||
1581 | |||
1582 | dwc_params = dma_read_byaddr(regs + r * sizeof(u32), | ||
1583 | DWC_PARAMS); | ||
1584 | |||
1503 | /* Decode maximum block size for given channel. The | 1585 | /* Decode maximum block size for given channel. The |
1504 | * stored 4 bit value represents blocks from 0x00 for 3 | 1586 | * stored 4 bit value represents blocks from 0x00 for 3 |
1505 | * up to 0x0a for 4095. */ | 1587 | * up to 0x0a for 4095. */ |
1506 | dwc->block_size = | 1588 | dwc->block_size = |
1507 | (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; | 1589 | (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; |
1508 | else | 1590 | dwc->nollp = |
1591 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; | ||
1592 | } else { | ||
1509 | dwc->block_size = pdata->block_size; | 1593 | dwc->block_size = pdata->block_size; |
1594 | |||
1595 | /* Check if channel supports multi block transfer */ | ||
1596 | channel_writel(dwc, LLP, 0xfffffffc); | ||
1597 | dwc->nollp = | ||
1598 | (channel_readl(dwc, LLP) & 0xfffffffc) == 0; | ||
1599 | channel_writel(dwc, LLP, 0); | ||
1600 | } | ||
1510 | } | 1601 | } |
1511 | 1602 | ||
1512 | /* Clear all interrupts on all channels. */ | 1603 | /* Clear all interrupts on all channels. */ |
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 06f03914f022..ff39fa6cd2bc 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -172,6 +172,7 @@ struct dw_dma_regs { | |||
172 | 172 | ||
173 | enum dw_dmac_flags { | 173 | enum dw_dmac_flags { |
174 | DW_DMA_IS_CYCLIC = 0, | 174 | DW_DMA_IS_CYCLIC = 0, |
175 | DW_DMA_IS_SOFT_LLP = 1, | ||
175 | }; | 176 | }; |
176 | 177 | ||
177 | struct dw_dma_chan { | 178 | struct dw_dma_chan { |
@@ -182,6 +183,10 @@ struct dw_dma_chan { | |||
182 | bool paused; | 183 | bool paused; |
183 | bool initialized; | 184 | bool initialized; |
184 | 185 | ||
186 | /* software emulation of the LLP transfers */ | ||
187 | struct list_head *tx_list; | ||
188 | struct list_head *tx_node_active; | ||
189 | |||
185 | spinlock_t lock; | 190 | spinlock_t lock; |
186 | 191 | ||
187 | /* these other elements are all protected by lock */ | 192 | /* these other elements are all protected by lock */ |
@@ -195,6 +200,7 @@ struct dw_dma_chan { | |||
195 | 200 | ||
196 | /* hardware configuration */ | 201 | /* hardware configuration */ |
197 | unsigned int block_size; | 202 | unsigned int block_size; |
203 | bool nollp; | ||
198 | 204 | ||
199 | /* configuration passed via DMA_SLAVE_CONFIG */ | 205 | /* configuration passed via DMA_SLAVE_CONFIG */ |
200 | struct dma_slave_config dma_sconfig; | 206 | struct dma_slave_config dma_sconfig; |