aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/shdma.c
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2010-02-03 09:46:41 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-02-07 19:40:26 -0500
commitcfefe99795251d76d92e8457f4152f532a961ec5 (patch)
tree531a4677401afb0e9816441ac1366dfa46f5ca7b /drivers/dma/shdma.c
parent623b4ac4bf9e767991c66e29b47dd4b19458fb42 (diff)
sh: implement DMA_SLAVE capability in SH dmaengine driver
Tested to work with a SIU ASoC driver on sh7722 (migor). Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Acked-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/dma/shdma.c')
-rw-r--r--drivers/dma/shdma.c190
1 files changed, 142 insertions, 48 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 3e1037c5ebd1..b75ce8b84c46 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -48,6 +48,9 @@ enum sh_dmae_desc_status {
48 */ 48 */
49#define RS_DEFAULT (RS_DUAL) 49#define RS_DEFAULT (RS_DUAL)
50 50
51/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
52static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
53
51static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 54static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
52 55
53#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) 56#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
@@ -61,12 +64,6 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
61 return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); 64 return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
62} 65}
63 66
64static void dmae_init(struct sh_dmae_chan *sh_chan)
65{
66 u32 chcr = RS_DEFAULT; /* default is DUAL mode */
67 sh_dmae_writel(sh_chan, chcr, CHCR);
68}
69
70/* 67/*
71 * Reset DMA controller 68 * Reset DMA controller
72 * 69 *
@@ -106,9 +103,8 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
106} 103}
107 104
108static unsigned int ts_shift[] = TS_SHIFT; 105static unsigned int ts_shift[] = TS_SHIFT;
109static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) 106static inline unsigned int calc_xmit_shift(u32 chcr)
110{ 107{
111 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
112 int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | 108 int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
113 ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); 109 ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
114 110
@@ -119,7 +115,7 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
119{ 115{
120 sh_dmae_writel(sh_chan, hw->sar, SAR); 116 sh_dmae_writel(sh_chan, hw->sar, SAR);
121 sh_dmae_writel(sh_chan, hw->dar, DAR); 117 sh_dmae_writel(sh_chan, hw->dar, DAR);
122 sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR); 118 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
123} 119}
124 120
125static void dmae_start(struct sh_dmae_chan *sh_chan) 121static void dmae_start(struct sh_dmae_chan *sh_chan)
@@ -127,7 +123,7 @@ static void dmae_start(struct sh_dmae_chan *sh_chan)
127 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 123 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
128 124
129 chcr |= CHCR_DE | CHCR_IE; 125 chcr |= CHCR_DE | CHCR_IE;
130 sh_dmae_writel(sh_chan, chcr, CHCR); 126 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
131} 127}
132 128
133static void dmae_halt(struct sh_dmae_chan *sh_chan) 129static void dmae_halt(struct sh_dmae_chan *sh_chan)
@@ -138,20 +134,27 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
138 sh_dmae_writel(sh_chan, chcr, CHCR); 134 sh_dmae_writel(sh_chan, chcr, CHCR);
139} 135}
140 136
137static void dmae_init(struct sh_dmae_chan *sh_chan)
138{
139 u32 chcr = RS_DEFAULT; /* default is DUAL mode */
140 sh_chan->xmit_shift = calc_xmit_shift(chcr);
141 sh_dmae_writel(sh_chan, chcr, CHCR);
142}
143
141static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 144static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
142{ 145{
143 /* When DMA was working, can not set data to CHCR */ 146 /* When DMA was working, can not set data to CHCR */
144 if (dmae_is_busy(sh_chan)) 147 if (dmae_is_busy(sh_chan))
145 return -EBUSY; 148 return -EBUSY;
146 149
150 sh_chan->xmit_shift = calc_xmit_shift(val);
147 sh_dmae_writel(sh_chan, val, CHCR); 151 sh_dmae_writel(sh_chan, val, CHCR);
152
148 return 0; 153 return 0;
149} 154}
150 155
151#define DMARS1_ADDR 0x04 156#define DMARS_SHIFT 8
152#define DMARS2_ADDR 0x08 157#define DMARS_CHAN_MSK 0x01
153#define DMARS_SHIFT 8
154#define DMARS_CHAN_MSK 0x01
155static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 158static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
156{ 159{
157 u32 addr; 160 u32 addr;
@@ -163,29 +166,18 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
163 if (sh_chan->id & DMARS_CHAN_MSK) 166 if (sh_chan->id & DMARS_CHAN_MSK)
164 shift = DMARS_SHIFT; 167 shift = DMARS_SHIFT;
165 168
166 switch (sh_chan->id) { 169 if (sh_chan->id < 6)
167 /* DMARS0 */ 170 /* DMA0RS0 - DMA0RS2 */
168 case 0: 171 addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4;
169 case 1: 172#ifdef SH_DMARS_BASE1
170 addr = SH_DMARS_BASE; 173 else if (sh_chan->id < 12)
171 break; 174 /* DMA1RS0 - DMA1RS2 */
172 /* DMARS1 */ 175 addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4;
173 case 2: 176#endif
174 case 3: 177 else
175 addr = (SH_DMARS_BASE + DMARS1_ADDR);
176 break;
177 /* DMARS2 */
178 case 4:
179 case 5:
180 addr = (SH_DMARS_BASE + DMARS2_ADDR);
181 break;
182 default:
183 return -EINVAL; 178 return -EINVAL;
184 }
185 179
186 ctrl_outw((val << shift) | 180 ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr);
187 (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)),
188 addr);
189 181
190 return 0; 182 return 0;
191} 183}
@@ -253,10 +245,53 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
253 return NULL; 245 return NULL;
254} 246}
255 247
248static struct sh_dmae_slave_config *sh_dmae_find_slave(
249 struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
250{
251 struct dma_device *dma_dev = sh_chan->common.device;
252 struct sh_dmae_device *shdev = container_of(dma_dev,
253 struct sh_dmae_device, common);
254 struct sh_dmae_pdata *pdata = &shdev->pdata;
255 int i;
256
257 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
258 return NULL;
259
260 for (i = 0; i < pdata->config_num; i++)
261 if (pdata->config[i].slave_id == slave_id)
262 return pdata->config + i;
263
264 return NULL;
265}
266
256static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) 267static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
257{ 268{
258 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 269 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
259 struct sh_desc *desc; 270 struct sh_desc *desc;
271 struct sh_dmae_slave *param = chan->private;
272
273 /*
274 * This relies on the guarantee from dmaengine that alloc_chan_resources
275 * never runs concurrently with itself or free_chan_resources.
276 */
277 if (param) {
278 struct sh_dmae_slave_config *cfg;
279
280 cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
281 if (!cfg)
282 return -EINVAL;
283
284 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
285 return -EBUSY;
286
287 param->config = cfg;
288
289 dmae_set_dmars(sh_chan, cfg->mid_rid);
290 dmae_set_chcr(sh_chan, cfg->chcr);
291 } else {
292 if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400)
293 dmae_set_chcr(sh_chan, RS_DEFAULT);
294 }
260 295
261 spin_lock_bh(&sh_chan->desc_lock); 296 spin_lock_bh(&sh_chan->desc_lock);
262 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 297 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
@@ -289,10 +324,18 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
289 struct sh_desc *desc, *_desc; 324 struct sh_desc *desc, *_desc;
290 LIST_HEAD(list); 325 LIST_HEAD(list);
291 326
327 dmae_halt(sh_chan);
328
292 /* Prepared and not submitted descriptors can still be on the queue */ 329 /* Prepared and not submitted descriptors can still be on the queue */
293 if (!list_empty(&sh_chan->ld_queue)) 330 if (!list_empty(&sh_chan->ld_queue))
294 sh_dmae_chan_ld_cleanup(sh_chan, true); 331 sh_dmae_chan_ld_cleanup(sh_chan, true);
295 332
333 if (chan->private) {
334 /* The caller is holding dma_list_mutex */
335 struct sh_dmae_slave *param = chan->private;
336 clear_bit(param->slave_id, sh_dmae_slave_used);
337 }
338
296 spin_lock_bh(&sh_chan->desc_lock); 339 spin_lock_bh(&sh_chan->desc_lock);
297 340
298 list_splice_init(&sh_chan->ld_free, &list); 341 list_splice_init(&sh_chan->ld_free, &list);
@@ -304,7 +347,7 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
304 kfree(desc); 347 kfree(desc);
305} 348}
306 349
307/* 350/**
308 * sh_dmae_add_desc - get, set up and return one transfer descriptor 351 * sh_dmae_add_desc - get, set up and return one transfer descriptor
309 * @sh_chan: DMA channel 352 * @sh_chan: DMA channel
310 * @flags: DMA transfer flags 353 * @flags: DMA transfer flags
@@ -351,12 +394,14 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
351 new->async_tx.cookie = -EINVAL; 394 new->async_tx.cookie = -EINVAL;
352 } 395 }
353 396
354 dev_dbg(sh_chan->dev, "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", 397 dev_dbg(sh_chan->dev,
398 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
355 copy_size, *len, *src, *dest, &new->async_tx, 399 copy_size, *len, *src, *dest, &new->async_tx,
356 new->async_tx.cookie); 400 new->async_tx.cookie, sh_chan->xmit_shift);
357 401
358 new->mark = DESC_PREPARED; 402 new->mark = DESC_PREPARED;
359 new->async_tx.flags = flags; 403 new->async_tx.flags = flags;
404 new->direction = direction;
360 405
361 *len -= copy_size; 406 *len -= copy_size;
362 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) 407 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
@@ -465,6 +510,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
465 if (!chan || !len) 510 if (!chan || !len)
466 return NULL; 511 return NULL;
467 512
513 chan->private = NULL;
514
468 sh_chan = to_sh_chan(chan); 515 sh_chan = to_sh_chan(chan);
469 516
470 sg_init_table(&sg, 1); 517 sg_init_table(&sg, 1);
@@ -477,6 +524,44 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
477 flags); 524 flags);
478} 525}
479 526
527static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
528 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
529 enum dma_data_direction direction, unsigned long flags)
530{
531 struct sh_dmae_slave *param;
532 struct sh_dmae_chan *sh_chan;
533
534 if (!chan)
535 return NULL;
536
537 sh_chan = to_sh_chan(chan);
538 param = chan->private;
539
540 /* Someone calling slave DMA on a public channel? */
541 if (!param || !sg_len) {
542 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
543 __func__, param, sg_len, param ? param->slave_id : -1);
544 return NULL;
545 }
546
547 /*
548 * if (param != NULL), this is a successfully requested slave channel,
549 * therefore param->config != NULL too.
550 */
551 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
552 direction, flags);
553}
554
555static void sh_dmae_terminate_all(struct dma_chan *chan)
556{
557 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
558
559 if (!chan)
560 return;
561
562 sh_dmae_chan_ld_cleanup(sh_chan, true);
563}
564
480static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 565static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
481{ 566{
482 struct sh_desc *desc, *_desc; 567 struct sh_desc *desc, *_desc;
@@ -508,7 +593,11 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
508 cookie = tx->cookie; 593 cookie = tx->cookie;
509 594
510 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 595 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
511 BUG_ON(sh_chan->completed_cookie != desc->cookie - 1); 596 if (sh_chan->completed_cookie != desc->cookie - 1)
597 dev_dbg(sh_chan->dev,
598 "Completing cookie %d, expected %d\n",
599 desc->cookie,
600 sh_chan->completed_cookie + 1);
512 sh_chan->completed_cookie = desc->cookie; 601 sh_chan->completed_cookie = desc->cookie;
513 } 602 }
514 603
@@ -581,7 +670,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
581 return; 670 return;
582 } 671 }
583 672
584 /* Find the first un-transfer desciptor */ 673 /* Find the first not transferred desciptor */
585 list_for_each_entry(sd, &sh_chan->ld_queue, node) 674 list_for_each_entry(sd, &sh_chan->ld_queue, node)
586 if (sd->mark == DESC_SUBMITTED) { 675 if (sd->mark == DESC_SUBMITTED) {
587 /* Get the ld start address from ld_queue */ 676 /* Get the ld start address from ld_queue */
@@ -685,11 +774,14 @@ static void dmae_do_tasklet(unsigned long data)
685 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 774 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
686 struct sh_desc *desc; 775 struct sh_desc *desc;
687 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 776 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
777 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
688 778
689 spin_lock(&sh_chan->desc_lock); 779 spin_lock(&sh_chan->desc_lock);
690 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 780 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
691 if ((desc->hw.sar + desc->hw.tcr) == sar_buf && 781 if (desc->mark == DESC_SUBMITTED &&
692 desc->mark == DESC_SUBMITTED) { 782 ((desc->direction == DMA_FROM_DEVICE &&
783 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
784 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
693 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", 785 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
694 desc->async_tx.cookie, &desc->async_tx, 786 desc->async_tx.cookie, &desc->async_tx,
695 desc->hw.dar); 787 desc->hw.dar);
@@ -762,7 +854,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
762 } 854 }
763 855
764 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 856 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
765 "sh-dmae%d", new_sh_chan->id); 857 "sh-dmae%d", new_sh_chan->id);
766 858
767 /* set up channel irq */ 859 /* set up channel irq */
768 err = request_irq(irq, &sh_dmae_interrupt, irqflags, 860 err = request_irq(irq, &sh_dmae_interrupt, irqflags,
@@ -773,11 +865,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
773 goto err_no_irq; 865 goto err_no_irq;
774 } 866 }
775 867
776 /* CHCR register control function */
777 new_sh_chan->set_chcr = dmae_set_chcr;
778 /* DMARS register control function */
779 new_sh_chan->set_dmars = dmae_set_dmars;
780
781 shdev->chan[id] = new_sh_chan; 868 shdev->chan[id] = new_sh_chan;
782 return 0; 869 return 0;
783 870
@@ -848,12 +935,19 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
848 INIT_LIST_HEAD(&shdev->common.channels); 935 INIT_LIST_HEAD(&shdev->common.channels);
849 936
850 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 937 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
938 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
939
851 shdev->common.device_alloc_chan_resources 940 shdev->common.device_alloc_chan_resources
852 = sh_dmae_alloc_chan_resources; 941 = sh_dmae_alloc_chan_resources;
853 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; 942 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
854 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; 943 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
855 shdev->common.device_is_tx_complete = sh_dmae_is_complete; 944 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
856 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 945 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
946
947 /* Compulsory for DMA_SLAVE fields */
948 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
949 shdev->common.device_terminate_all = sh_dmae_terminate_all;
950
857 shdev->common.dev = &pdev->dev; 951 shdev->common.dev = &pdev->dev;
858 /* Default transfer size of 32 bytes requires 32-byte alignment */ 952 /* Default transfer size of 32 bytes requires 32-byte alignment */
859 shdev->common.copy_align = 5; 953 shdev->common.copy_align = 5;