aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/pl330.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/pl330.c')
-rw-r--r--drivers/dma/pl330.c229
1 files changed, 207 insertions, 22 deletions
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 00eee59e8b33..621134fdba4c 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -17,6 +17,8 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/amba/bus.h> 18#include <linux/amba/bus.h>
19#include <linux/amba/pl330.h> 19#include <linux/amba/pl330.h>
20#include <linux/pm_runtime.h>
21#include <linux/scatterlist.h>
20 22
21#define NR_DEFAULT_DESC 16 23#define NR_DEFAULT_DESC 16
22 24
@@ -68,6 +70,14 @@ struct dma_pl330_chan {
68 * NULL if the channel is available to be acquired. 70 * NULL if the channel is available to be acquired.
69 */ 71 */
70 void *pl330_chid; 72 void *pl330_chid;
73
74 /* For D-to-M and M-to-D channels */
75 int burst_sz; /* the peripheral fifo width */
76 int burst_len; /* the number of burst */
77 dma_addr_t fifo_addr;
78
79 /* for cyclic capability */
80 bool cyclic;
71}; 81};
72 82
73struct dma_pl330_dmac { 83struct dma_pl330_dmac {
@@ -83,6 +93,8 @@ struct dma_pl330_dmac {
83 93
84 /* Peripheral channels connected to this DMAC */ 94 /* Peripheral channels connected to this DMAC */
85 struct dma_pl330_chan *peripherals; /* keep at end */ 95 struct dma_pl330_chan *peripherals; /* keep at end */
96
97 struct clk *clk;
86}; 98};
87 99
88struct dma_pl330_desc { 100struct dma_pl330_desc {
@@ -152,6 +164,31 @@ static inline void free_desc_list(struct list_head *list)
152 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 164 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
153} 165}
154 166
167static inline void handle_cyclic_desc_list(struct list_head *list)
168{
169 struct dma_pl330_desc *desc;
170 struct dma_pl330_chan *pch;
171 unsigned long flags;
172
173 if (list_empty(list))
174 return;
175
176 list_for_each_entry(desc, list, node) {
177 dma_async_tx_callback callback;
178
179 /* Change status to reload it */
180 desc->status = PREP;
181 pch = desc->pchan;
182 callback = desc->txd.callback;
183 if (callback)
184 callback(desc->txd.callback_param);
185 }
186
187 spin_lock_irqsave(&pch->lock, flags);
188 list_splice_tail_init(list, &pch->work_list);
189 spin_unlock_irqrestore(&pch->lock, flags);
190}
191
155static inline void fill_queue(struct dma_pl330_chan *pch) 192static inline void fill_queue(struct dma_pl330_chan *pch)
156{ 193{
157 struct dma_pl330_desc *desc; 194 struct dma_pl330_desc *desc;
@@ -205,7 +242,10 @@ static void pl330_tasklet(unsigned long data)
205 242
206 spin_unlock_irqrestore(&pch->lock, flags); 243 spin_unlock_irqrestore(&pch->lock, flags);
207 244
208 free_desc_list(&list); 245 if (pch->cyclic)
246 handle_cyclic_desc_list(&list);
247 else
248 free_desc_list(&list);
209} 249}
210 250
211static void dma_pl330_rqcb(void *token, enum pl330_op_err err) 251static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -236,6 +276,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
236 spin_lock_irqsave(&pch->lock, flags); 276 spin_lock_irqsave(&pch->lock, flags);
237 277
238 pch->completed = chan->cookie = 1; 278 pch->completed = chan->cookie = 1;
279 pch->cyclic = false;
239 280
240 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 281 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
241 if (!pch->pl330_chid) { 282 if (!pch->pl330_chid) {
@@ -253,25 +294,52 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
253static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) 294static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
254{ 295{
255 struct dma_pl330_chan *pch = to_pchan(chan); 296 struct dma_pl330_chan *pch = to_pchan(chan);
256 struct dma_pl330_desc *desc; 297 struct dma_pl330_desc *desc, *_dt;
257 unsigned long flags; 298 unsigned long flags;
299 struct dma_pl330_dmac *pdmac = pch->dmac;
300 struct dma_slave_config *slave_config;
301 LIST_HEAD(list);
258 302
259 /* Only supports DMA_TERMINATE_ALL */ 303 switch (cmd) {
260 if (cmd != DMA_TERMINATE_ALL) 304 case DMA_TERMINATE_ALL:
261 return -ENXIO; 305 spin_lock_irqsave(&pch->lock, flags);
262
263 spin_lock_irqsave(&pch->lock, flags);
264
265 /* FLUSH the PL330 Channel thread */
266 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
267 306
268 /* Mark all desc done */ 307 /* FLUSH the PL330 Channel thread */
269 list_for_each_entry(desc, &pch->work_list, node) 308 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
270 desc->status = DONE;
271 309
272 spin_unlock_irqrestore(&pch->lock, flags); 310 /* Mark all desc done */
311 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
312 desc->status = DONE;
313 pch->completed = desc->txd.cookie;
314 list_move_tail(&desc->node, &list);
315 }
273 316
274 pl330_tasklet((unsigned long) pch); 317 list_splice_tail_init(&list, &pdmac->desc_pool);
318 spin_unlock_irqrestore(&pch->lock, flags);
319 break;
320 case DMA_SLAVE_CONFIG:
321 slave_config = (struct dma_slave_config *)arg;
322
323 if (slave_config->direction == DMA_TO_DEVICE) {
324 if (slave_config->dst_addr)
325 pch->fifo_addr = slave_config->dst_addr;
326 if (slave_config->dst_addr_width)
327 pch->burst_sz = __ffs(slave_config->dst_addr_width);
328 if (slave_config->dst_maxburst)
329 pch->burst_len = slave_config->dst_maxburst;
330 } else if (slave_config->direction == DMA_FROM_DEVICE) {
331 if (slave_config->src_addr)
332 pch->fifo_addr = slave_config->src_addr;
333 if (slave_config->src_addr_width)
334 pch->burst_sz = __ffs(slave_config->src_addr_width);
335 if (slave_config->src_maxburst)
336 pch->burst_len = slave_config->src_maxburst;
337 }
338 break;
339 default:
340 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
341 return -ENXIO;
342 }
275 343
276 return 0; 344 return 0;
277} 345}
@@ -288,6 +356,9 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
288 pl330_release_channel(pch->pl330_chid); 356 pl330_release_channel(pch->pl330_chid);
289 pch->pl330_chid = NULL; 357 pch->pl330_chid = NULL;
290 358
359 if (pch->cyclic)
360 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
361
291 spin_unlock_irqrestore(&pch->lock, flags); 362 spin_unlock_irqrestore(&pch->lock, flags);
292} 363}
293 364
@@ -453,7 +524,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
453 524
454 if (peri) { 525 if (peri) {
455 desc->req.rqtype = peri->rqtype; 526 desc->req.rqtype = peri->rqtype;
456 desc->req.peri = peri->peri_id; 527 desc->req.peri = pch->chan.chan_id;
457 } else { 528 } else {
458 desc->req.rqtype = MEMTOMEM; 529 desc->req.rqtype = MEMTOMEM;
459 desc->req.peri = 0; 530 desc->req.peri = 0;
@@ -524,6 +595,51 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
524 return burst_len; 595 return burst_len;
525} 596}
526 597
598static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
599 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
600 size_t period_len, enum dma_data_direction direction)
601{
602 struct dma_pl330_desc *desc;
603 struct dma_pl330_chan *pch = to_pchan(chan);
604 dma_addr_t dst;
605 dma_addr_t src;
606
607 desc = pl330_get_desc(pch);
608 if (!desc) {
609 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
610 __func__, __LINE__);
611 return NULL;
612 }
613
614 switch (direction) {
615 case DMA_TO_DEVICE:
616 desc->rqcfg.src_inc = 1;
617 desc->rqcfg.dst_inc = 0;
618 src = dma_addr;
619 dst = pch->fifo_addr;
620 break;
621 case DMA_FROM_DEVICE:
622 desc->rqcfg.src_inc = 0;
623 desc->rqcfg.dst_inc = 1;
624 src = pch->fifo_addr;
625 dst = dma_addr;
626 break;
627 default:
628 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
629 __func__, __LINE__);
630 return NULL;
631 }
632
633 desc->rqcfg.brst_size = pch->burst_sz;
634 desc->rqcfg.brst_len = 1;
635
636 pch->cyclic = true;
637
638 fill_px(&desc->px, dst, src, period_len);
639
640 return &desc->txd;
641}
642
527static struct dma_async_tx_descriptor * 643static struct dma_async_tx_descriptor *
528pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, 644pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
529 dma_addr_t src, size_t len, unsigned long flags) 645 dma_addr_t src, size_t len, unsigned long flags)
@@ -579,7 +695,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
579 struct dma_pl330_peri *peri = chan->private; 695 struct dma_pl330_peri *peri = chan->private;
580 struct scatterlist *sg; 696 struct scatterlist *sg;
581 unsigned long flags; 697 unsigned long flags;
582 int i, burst_size; 698 int i;
583 dma_addr_t addr; 699 dma_addr_t addr;
584 700
585 if (unlikely(!pch || !sgl || !sg_len || !peri)) 701 if (unlikely(!pch || !sgl || !sg_len || !peri))
@@ -595,8 +711,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
595 return NULL; 711 return NULL;
596 } 712 }
597 713
598 addr = peri->fifo_addr; 714 addr = pch->fifo_addr;
599 burst_size = peri->burst_sz;
600 715
601 first = NULL; 716 first = NULL;
602 717
@@ -644,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
644 sg_dma_address(sg), addr, sg_dma_len(sg)); 759 sg_dma_address(sg), addr, sg_dma_len(sg));
645 } 760 }
646 761
647 desc->rqcfg.brst_size = burst_size; 762 desc->rqcfg.brst_size = pch->burst_sz;
648 desc->rqcfg.brst_len = 1; 763 desc->rqcfg.brst_len = 1;
649 } 764 }
650 765
@@ -696,6 +811,30 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
696 goto probe_err1; 811 goto probe_err1;
697 } 812 }
698 813
814 pdmac->clk = clk_get(&adev->dev, "dma");
815 if (IS_ERR(pdmac->clk)) {
816 dev_err(&adev->dev, "Cannot get operation clock.\n");
817 ret = -EINVAL;
818 goto probe_err1;
819 }
820
821 amba_set_drvdata(adev, pdmac);
822
823#ifdef CONFIG_PM_RUNTIME
824 /* to use the runtime PM helper functions */
825 pm_runtime_enable(&adev->dev);
826
827 /* enable the power domain */
828 if (pm_runtime_get_sync(&adev->dev)) {
829 dev_err(&adev->dev, "failed to get runtime pm\n");
830 ret = -ENODEV;
831 goto probe_err1;
832 }
833#else
834 /* enable dma clk */
835 clk_enable(pdmac->clk);
836#endif
837
699 irq = adev->irq[0]; 838 irq = adev->irq[0];
700 ret = request_irq(irq, pl330_irq_handler, 0, 839 ret = request_irq(irq, pl330_irq_handler, 0,
701 dev_name(&adev->dev), pi); 840 dev_name(&adev->dev), pi);
@@ -732,6 +871,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
732 case MEMTODEV: 871 case MEMTODEV:
733 case DEVTOMEM: 872 case DEVTOMEM:
734 dma_cap_set(DMA_SLAVE, pd->cap_mask); 873 dma_cap_set(DMA_SLAVE, pd->cap_mask);
874 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
735 break; 875 break;
736 default: 876 default:
737 dev_err(&adev->dev, "DEVTODEV Not Supported\n"); 877 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
@@ -760,6 +900,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
760 pd->device_alloc_chan_resources = pl330_alloc_chan_resources; 900 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
761 pd->device_free_chan_resources = pl330_free_chan_resources; 901 pd->device_free_chan_resources = pl330_free_chan_resources;
762 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; 902 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
903 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
763 pd->device_tx_status = pl330_tx_status; 904 pd->device_tx_status = pl330_tx_status;
764 pd->device_prep_slave_sg = pl330_prep_slave_sg; 905 pd->device_prep_slave_sg = pl330_prep_slave_sg;
765 pd->device_control = pl330_control; 906 pd->device_control = pl330_control;
@@ -771,8 +912,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
771 goto probe_err4; 912 goto probe_err4;
772 } 913 }
773 914
774 amba_set_drvdata(adev, pdmac);
775
776 dev_info(&adev->dev, 915 dev_info(&adev->dev,
777 "Loaded driver for PL330 DMAC-%d\n", adev->periphid); 916 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
778 dev_info(&adev->dev, 917 dev_info(&adev->dev,
@@ -833,6 +972,13 @@ static int __devexit pl330_remove(struct amba_device *adev)
833 res = &adev->res; 972 res = &adev->res;
834 release_mem_region(res->start, resource_size(res)); 973 release_mem_region(res->start, resource_size(res));
835 974
975#ifdef CONFIG_PM_RUNTIME
976 pm_runtime_put(&adev->dev);
977 pm_runtime_disable(&adev->dev);
978#else
979 clk_disable(pdmac->clk);
980#endif
981
836 kfree(pdmac); 982 kfree(pdmac);
837 983
838 return 0; 984 return 0;
@@ -846,10 +992,49 @@ static struct amba_id pl330_ids[] = {
846 { 0, 0 }, 992 { 0, 0 },
847}; 993};
848 994
995#ifdef CONFIG_PM_RUNTIME
996static int pl330_runtime_suspend(struct device *dev)
997{
998 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
999
1000 if (!pdmac) {
1001 dev_err(dev, "failed to get dmac\n");
1002 return -ENODEV;
1003 }
1004
1005 clk_disable(pdmac->clk);
1006
1007 return 0;
1008}
1009
1010static int pl330_runtime_resume(struct device *dev)
1011{
1012 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
1013
1014 if (!pdmac) {
1015 dev_err(dev, "failed to get dmac\n");
1016 return -ENODEV;
1017 }
1018
1019 clk_enable(pdmac->clk);
1020
1021 return 0;
1022}
1023#else
1024#define pl330_runtime_suspend NULL
1025#define pl330_runtime_resume NULL
1026#endif /* CONFIG_PM_RUNTIME */
1027
1028static const struct dev_pm_ops pl330_pm_ops = {
1029 .runtime_suspend = pl330_runtime_suspend,
1030 .runtime_resume = pl330_runtime_resume,
1031};
1032
849static struct amba_driver pl330_driver = { 1033static struct amba_driver pl330_driver = {
850 .drv = { 1034 .drv = {
851 .owner = THIS_MODULE, 1035 .owner = THIS_MODULE,
852 .name = "dma-pl330", 1036 .name = "dma-pl330",
1037 .pm = &pl330_pm_ops,
853 }, 1038 },
854 .id_table = pl330_ids, 1039 .id_table = pl330_ids,
855 .probe = pl330_probe, 1040 .probe = pl330_probe,