aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c25
-rw-r--r--drivers/dma/shdma.c411
-rw-r--r--drivers/dma/shdma.h7
7 files changed, 309 insertions, 141 deletions
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index b5f2ee0f8e2c..64a937262a40 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data)
613 cohd_fin->pending_irqs--; 613 cohd_fin->pending_irqs--;
614 cohc->completed = cohd_fin->desc.cookie; 614 cohc->completed = cohd_fin->desc.cookie;
615 615
616 BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
617
618 if (cohc->nbr_active_done == 0) 616 if (cohc->nbr_active_done == 0)
619 return; 617 return;
620 618
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 4eadd98cea53..87399cafce37 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device)
826 chan->dev->chan = NULL; 826 chan->dev->chan = NULL;
827 mutex_unlock(&dma_list_mutex); 827 mutex_unlock(&dma_list_mutex);
828 device_unregister(&chan->dev->device); 828 device_unregister(&chan->dev->device);
829 free_percpu(chan->local);
829 } 830 }
830} 831}
831EXPORT_SYMBOL(dma_async_device_unregister); 832EXPORT_SYMBOL(dma_async_device_unregister);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 8b905161fbf4..948d563941c9 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -467,7 +467,7 @@ err_srcs:
467 467
468 if (iterations > 0) 468 if (iterations > 0)
469 while (!kthread_should_stop()) { 469 while (!kthread_should_stop()) {
470 DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); 470 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
471 interruptible_sleep_on(&wait_dmatest_exit); 471 interruptible_sleep_on(&wait_dmatest_exit);
472 } 472 }
473 473
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5f7a500e18d0..5cc37afe2bc1 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
249 if (is_ioat_active(status) || is_ioat_idle(status)) 249 if (is_ioat_active(status) || is_ioat_idle(status))
250 ioat_suspend(chan); 250 ioat_suspend(chan);
251 while (is_ioat_active(status) || is_ioat_idle(status)) { 251 while (is_ioat_active(status) || is_ioat_idle(status)) {
252 if (end && time_after(jiffies, end)) { 252 if (tmo && time_after(jiffies, end)) {
253 err = -ETIMEDOUT; 253 err = -ETIMEDOUT;
254 break; 254 break;
255 } 255 }
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 9a5bc1a7389e..e80bae1673fa 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
761 * @buffer_n: buffer number to update. 761 * @buffer_n: buffer number to update.
762 * 0 or 1 are the only valid values. 762 * 0 or 1 are the only valid values.
763 * @phyaddr: buffer physical address. 763 * @phyaddr: buffer physical address.
764 * @return: Returns 0 on success or negative error code on failure. This
765 * function will fail if the buffer is set to ready.
766 */ 764 */
767/* Called under spin_lock(_irqsave)(&ichan->lock) */ 765/* Called under spin_lock(_irqsave)(&ichan->lock) */
768static int ipu_update_channel_buffer(struct idmac_channel *ichan, 766static void ipu_update_channel_buffer(struct idmac_channel *ichan,
769 int buffer_n, dma_addr_t phyaddr) 767 int buffer_n, dma_addr_t phyaddr)
770{ 768{
771 enum ipu_channel channel = ichan->dma_chan.chan_id; 769 enum ipu_channel channel = ichan->dma_chan.chan_id;
772 uint32_t reg; 770 uint32_t reg;
@@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
806 } 804 }
807 805
808 spin_unlock_irqrestore(&ipu_data.lock, flags); 806 spin_unlock_irqrestore(&ipu_data.lock, flags);
809
810 return 0;
811} 807}
812 808
813/* Called under spin_lock_irqsave(&ichan->lock) */ 809/* Called under spin_lock_irqsave(&ichan->lock) */
@@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
816{ 812{
817 unsigned int chan_id = ichan->dma_chan.chan_id; 813 unsigned int chan_id = ichan->dma_chan.chan_id;
818 struct device *dev = &ichan->dma_chan.dev->device; 814 struct device *dev = &ichan->dma_chan.dev->device;
819 int ret;
820 815
821 if (async_tx_test_ack(&desc->txd)) 816 if (async_tx_test_ack(&desc->txd))
822 return -EINTR; 817 return -EINTR;
@@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
827 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but 822 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
828 * doing it again shouldn't hurt either. 823 * doing it again shouldn't hurt either.
829 */ 824 */
830 ret = ipu_update_channel_buffer(ichan, buf_idx, 825 ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
831 sg_dma_address(sg));
832
833 if (ret < 0) {
834 dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
835 sg, chan_id, buf_idx);
836 return ret;
837 }
838 826
839 ipu_select_buffer(chan_id, buf_idx); 827 ipu_select_buffer(chan_id, buf_idx);
840 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", 828 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
@@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1379 1367
1380 if (likely(sgnew) && 1368 if (likely(sgnew) &&
1381 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { 1369 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1382 callback = desc->txd.callback; 1370 callback = descnew->txd.callback;
1383 callback_param = desc->txd.callback_param; 1371 callback_param = descnew->txd.callback_param;
1384 spin_unlock(&ichan->lock); 1372 spin_unlock(&ichan->lock);
1385 callback(callback_param); 1373 if (callback)
1374 callback(callback_param);
1386 spin_lock(&ichan->lock); 1375 spin_lock(&ichan->lock);
1387 } 1376 }
1388 1377
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index d10cc899c460..b75ce8b84c46 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -48,23 +48,20 @@ enum sh_dmae_desc_status {
48 */ 48 */
49#define RS_DEFAULT (RS_DUAL) 49#define RS_DEFAULT (RS_DUAL)
50 50
51/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
52static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
53
51static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 54static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
52 55
53#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) 56#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
54static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 57static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
55{ 58{
56 ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); 59 ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
57} 60}
58 61
59static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 62static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
60{ 63{
61 return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); 64 return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
62}
63
64static void dmae_init(struct sh_dmae_chan *sh_chan)
65{
66 u32 chcr = RS_DEFAULT; /* default is DUAL mode */
67 sh_dmae_writel(sh_chan, chcr, CHCR);
68} 65}
69 66
70/* 67/*
@@ -95,27 +92,30 @@ static int sh_dmae_rst(int id)
95 return 0; 92 return 0;
96} 93}
97 94
98static int dmae_is_busy(struct sh_dmae_chan *sh_chan) 95static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
99{ 96{
100 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 97 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
101 if (chcr & CHCR_DE) { 98
102 if (!(chcr & CHCR_TE)) 99 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
103 return -EBUSY; /* working */ 100 return true; /* working */
104 } 101
105 return 0; /* waiting */ 102 return false; /* waiting */
106} 103}
107 104
108static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) 105static unsigned int ts_shift[] = TS_SHIFT;
106static inline unsigned int calc_xmit_shift(u32 chcr)
109{ 107{
110 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 108 int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
111 return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; 109 ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
110
111 return ts_shift[cnt];
112} 112}
113 113
114static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 114static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
115{ 115{
116 sh_dmae_writel(sh_chan, hw->sar, SAR); 116 sh_dmae_writel(sh_chan, hw->sar, SAR);
117 sh_dmae_writel(sh_chan, hw->dar, DAR); 117 sh_dmae_writel(sh_chan, hw->dar, DAR);
118 sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR); 118 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
119} 119}
120 120
121static void dmae_start(struct sh_dmae_chan *sh_chan) 121static void dmae_start(struct sh_dmae_chan *sh_chan)
@@ -123,7 +123,7 @@ static void dmae_start(struct sh_dmae_chan *sh_chan)
123 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 123 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
124 124
125 chcr |= CHCR_DE | CHCR_IE; 125 chcr |= CHCR_DE | CHCR_IE;
126 sh_dmae_writel(sh_chan, chcr, CHCR); 126 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
127} 127}
128 128
129static void dmae_halt(struct sh_dmae_chan *sh_chan) 129static void dmae_halt(struct sh_dmae_chan *sh_chan)
@@ -134,55 +134,50 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
134 sh_dmae_writel(sh_chan, chcr, CHCR); 134 sh_dmae_writel(sh_chan, chcr, CHCR);
135} 135}
136 136
137static void dmae_init(struct sh_dmae_chan *sh_chan)
138{
139 u32 chcr = RS_DEFAULT; /* default is DUAL mode */
140 sh_chan->xmit_shift = calc_xmit_shift(chcr);
141 sh_dmae_writel(sh_chan, chcr, CHCR);
142}
143
137static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 144static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
138{ 145{
139 int ret = dmae_is_busy(sh_chan);
140 /* When DMA was working, can not set data to CHCR */ 146 /* When DMA was working, can not set data to CHCR */
141 if (ret) 147 if (dmae_is_busy(sh_chan))
142 return ret; 148 return -EBUSY;
143 149
150 sh_chan->xmit_shift = calc_xmit_shift(val);
144 sh_dmae_writel(sh_chan, val, CHCR); 151 sh_dmae_writel(sh_chan, val, CHCR);
152
145 return 0; 153 return 0;
146} 154}
147 155
148#define DMARS1_ADDR 0x04 156#define DMARS_SHIFT 8
149#define DMARS2_ADDR 0x08 157#define DMARS_CHAN_MSK 0x01
150#define DMARS_SHIFT 8
151#define DMARS_CHAN_MSK 0x01
152static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 158static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
153{ 159{
154 u32 addr; 160 u32 addr;
155 int shift = 0; 161 int shift = 0;
156 int ret = dmae_is_busy(sh_chan); 162
157 if (ret) 163 if (dmae_is_busy(sh_chan))
158 return ret; 164 return -EBUSY;
159 165
160 if (sh_chan->id & DMARS_CHAN_MSK) 166 if (sh_chan->id & DMARS_CHAN_MSK)
161 shift = DMARS_SHIFT; 167 shift = DMARS_SHIFT;
162 168
163 switch (sh_chan->id) { 169 if (sh_chan->id < 6)
164 /* DMARS0 */ 170 /* DMA0RS0 - DMA0RS2 */
165 case 0: 171 addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4;
166 case 1: 172#ifdef SH_DMARS_BASE1
167 addr = SH_DMARS_BASE; 173 else if (sh_chan->id < 12)
168 break; 174 /* DMA1RS0 - DMA1RS2 */
169 /* DMARS1 */ 175 addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4;
170 case 2: 176#endif
171 case 3: 177 else
172 addr = (SH_DMARS_BASE + DMARS1_ADDR);
173 break;
174 /* DMARS2 */
175 case 4:
176 case 5:
177 addr = (SH_DMARS_BASE + DMARS2_ADDR);
178 break;
179 default:
180 return -EINVAL; 178 return -EINVAL;
181 }
182 179
183 ctrl_outw((val << shift) | 180 ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr);
184 (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)),
185 addr);
186 181
187 return 0; 182 return 0;
188} 183}
@@ -250,10 +245,53 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
250 return NULL; 245 return NULL;
251} 246}
252 247
248static struct sh_dmae_slave_config *sh_dmae_find_slave(
249 struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
250{
251 struct dma_device *dma_dev = sh_chan->common.device;
252 struct sh_dmae_device *shdev = container_of(dma_dev,
253 struct sh_dmae_device, common);
254 struct sh_dmae_pdata *pdata = &shdev->pdata;
255 int i;
256
257 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
258 return NULL;
259
260 for (i = 0; i < pdata->config_num; i++)
261 if (pdata->config[i].slave_id == slave_id)
262 return pdata->config + i;
263
264 return NULL;
265}
266
253static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) 267static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
254{ 268{
255 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 269 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
256 struct sh_desc *desc; 270 struct sh_desc *desc;
271 struct sh_dmae_slave *param = chan->private;
272
273 /*
274 * This relies on the guarantee from dmaengine that alloc_chan_resources
275 * never runs concurrently with itself or free_chan_resources.
276 */
277 if (param) {
278 struct sh_dmae_slave_config *cfg;
279
280 cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
281 if (!cfg)
282 return -EINVAL;
283
284 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
285 return -EBUSY;
286
287 param->config = cfg;
288
289 dmae_set_dmars(sh_chan, cfg->mid_rid);
290 dmae_set_chcr(sh_chan, cfg->chcr);
291 } else {
292 if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400)
293 dmae_set_chcr(sh_chan, RS_DEFAULT);
294 }
257 295
258 spin_lock_bh(&sh_chan->desc_lock); 296 spin_lock_bh(&sh_chan->desc_lock);
259 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 297 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
@@ -286,10 +324,18 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
286 struct sh_desc *desc, *_desc; 324 struct sh_desc *desc, *_desc;
287 LIST_HEAD(list); 325 LIST_HEAD(list);
288 326
327 dmae_halt(sh_chan);
328
289 /* Prepared and not submitted descriptors can still be on the queue */ 329 /* Prepared and not submitted descriptors can still be on the queue */
290 if (!list_empty(&sh_chan->ld_queue)) 330 if (!list_empty(&sh_chan->ld_queue))
291 sh_dmae_chan_ld_cleanup(sh_chan, true); 331 sh_dmae_chan_ld_cleanup(sh_chan, true);
292 332
333 if (chan->private) {
334 /* The caller is holding dma_list_mutex */
335 struct sh_dmae_slave *param = chan->private;
336 clear_bit(param->slave_id, sh_dmae_slave_used);
337 }
338
293 spin_lock_bh(&sh_chan->desc_lock); 339 spin_lock_bh(&sh_chan->desc_lock);
294 340
295 list_splice_init(&sh_chan->ld_free, &list); 341 list_splice_init(&sh_chan->ld_free, &list);
@@ -301,23 +347,97 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
301 kfree(desc); 347 kfree(desc);
302} 348}
303 349
304static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( 350/**
305 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 351 * sh_dmae_add_desc - get, set up and return one transfer descriptor
306 size_t len, unsigned long flags) 352 * @sh_chan: DMA channel
353 * @flags: DMA transfer flags
354 * @dest: destination DMA address, incremented when direction equals
355 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
356 * @src: source DMA address, incremented when direction equals
357 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
358 * @len: DMA transfer length
359 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
360 * @direction: needed for slave DMA to decide which address to keep constant,
361 * equals DMA_BIDIRECTIONAL for MEMCPY
362 * Returns 0 or an error
363 * Locks: called with desc_lock held
364 */
365static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
366 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
367 struct sh_desc **first, enum dma_data_direction direction)
307{ 368{
308 struct sh_dmae_chan *sh_chan; 369 struct sh_desc *new;
309 struct sh_desc *first = NULL, *prev = NULL, *new;
310 size_t copy_size; 370 size_t copy_size;
311 LIST_HEAD(tx_list);
312 int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
313 371
314 if (!chan) 372 if (!*len)
315 return NULL; 373 return NULL;
316 374
317 if (!len) 375 /* Allocate the link descriptor from the free list */
376 new = sh_dmae_get_desc(sh_chan);
377 if (!new) {
378 dev_err(sh_chan->dev, "No free link descriptor available\n");
318 return NULL; 379 return NULL;
380 }
319 381
320 sh_chan = to_sh_chan(chan); 382 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
383
384 new->hw.sar = *src;
385 new->hw.dar = *dest;
386 new->hw.tcr = copy_size;
387
388 if (!*first) {
389 /* First desc */
390 new->async_tx.cookie = -EBUSY;
391 *first = new;
392 } else {
393 /* Other desc - invisible to the user */
394 new->async_tx.cookie = -EINVAL;
395 }
396
397 dev_dbg(sh_chan->dev,
398 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
399 copy_size, *len, *src, *dest, &new->async_tx,
400 new->async_tx.cookie, sh_chan->xmit_shift);
401
402 new->mark = DESC_PREPARED;
403 new->async_tx.flags = flags;
404 new->direction = direction;
405
406 *len -= copy_size;
407 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
408 *src += copy_size;
409 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
410 *dest += copy_size;
411
412 return new;
413}
414
415/*
416 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
417 *
418 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
419 * converted to scatter-gather to guarantee consistent locking and a correct
420 * list manipulation. For slave DMA direction carries the usual meaning, and,
421 * logically, the SG list is RAM and the addr variable contains slave address,
422 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
423 * and the SG list contains only one element and points at the source buffer.
424 */
425static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
426 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
427 enum dma_data_direction direction, unsigned long flags)
428{
429 struct scatterlist *sg;
430 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
431 LIST_HEAD(tx_list);
432 int chunks = 0;
433 int i;
434
435 if (!sg_len)
436 return NULL;
437
438 for_each_sg(sgl, sg, sg_len, i)
439 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
440 (SH_DMA_TCR_MAX + 1);
321 441
322 /* Have to lock the whole loop to protect against concurrent release */ 442 /* Have to lock the whole loop to protect against concurrent release */
323 spin_lock_bh(&sh_chan->desc_lock); 443 spin_lock_bh(&sh_chan->desc_lock);
@@ -333,49 +453,32 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
333 * only during this function, then they are immediately spliced 453 * only during this function, then they are immediately spliced
334 * back onto the free list in form of a chain 454 * back onto the free list in form of a chain
335 */ 455 */
336 do { 456 for_each_sg(sgl, sg, sg_len, i) {
337 /* Allocate the link descriptor from the free list */ 457 dma_addr_t sg_addr = sg_dma_address(sg);
338 new = sh_dmae_get_desc(sh_chan); 458 size_t len = sg_dma_len(sg);
339 if (!new) { 459
340 dev_err(sh_chan->dev, 460 if (!len)
341 "No free memory for link descriptor\n"); 461 goto err_get_desc;
342 list_for_each_entry(new, &tx_list, node) 462
343 new->mark = DESC_IDLE; 463 do {
344 list_splice(&tx_list, &sh_chan->ld_free); 464 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
345 spin_unlock_bh(&sh_chan->desc_lock); 465 i, sg, len, (unsigned long long)sg_addr);
346 return NULL; 466
347 } 467 if (direction == DMA_FROM_DEVICE)
348 468 new = sh_dmae_add_desc(sh_chan, flags,
349 copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1); 469 &sg_addr, addr, &len, &first,
350 470 direction);
351 new->hw.sar = dma_src; 471 else
352 new->hw.dar = dma_dest; 472 new = sh_dmae_add_desc(sh_chan, flags,
353 new->hw.tcr = copy_size; 473 addr, &sg_addr, &len, &first,
354 if (!first) { 474 direction);
355 /* First desc */ 475 if (!new)
356 new->async_tx.cookie = -EBUSY; 476 goto err_get_desc;
357 first = new; 477
358 } else { 478 new->chunks = chunks--;
359 /* Other desc - invisible to the user */ 479 list_add_tail(&new->node, &tx_list);
360 new->async_tx.cookie = -EINVAL; 480 } while (len);
361 } 481 }
362
363 dev_dbg(sh_chan->dev,
364 "chaining %u of %u with %p, dst %x, cookie %d\n",
365 copy_size, len, &new->async_tx, dma_dest,
366 new->async_tx.cookie);
367
368 new->mark = DESC_PREPARED;
369 new->async_tx.flags = flags;
370 new->chunks = chunks--;
371
372 prev = new;
373 len -= copy_size;
374 dma_src += copy_size;
375 dma_dest += copy_size;
376 /* Insert the link descriptor to the LD ring */
377 list_add_tail(&new->node, &tx_list);
378 } while (len);
379 482
380 if (new != first) 483 if (new != first)
381 new->async_tx.cookie = -ENOSPC; 484 new->async_tx.cookie = -ENOSPC;
@@ -386,6 +489,77 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
386 spin_unlock_bh(&sh_chan->desc_lock); 489 spin_unlock_bh(&sh_chan->desc_lock);
387 490
388 return &first->async_tx; 491 return &first->async_tx;
492
493err_get_desc:
494 list_for_each_entry(new, &tx_list, node)
495 new->mark = DESC_IDLE;
496 list_splice(&tx_list, &sh_chan->ld_free);
497
498 spin_unlock_bh(&sh_chan->desc_lock);
499
500 return NULL;
501}
502
503static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
504 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
505 size_t len, unsigned long flags)
506{
507 struct sh_dmae_chan *sh_chan;
508 struct scatterlist sg;
509
510 if (!chan || !len)
511 return NULL;
512
513 chan->private = NULL;
514
515 sh_chan = to_sh_chan(chan);
516
517 sg_init_table(&sg, 1);
518 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
519 offset_in_page(dma_src));
520 sg_dma_address(&sg) = dma_src;
521 sg_dma_len(&sg) = len;
522
523 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
524 flags);
525}
526
527static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
528 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
529 enum dma_data_direction direction, unsigned long flags)
530{
531 struct sh_dmae_slave *param;
532 struct sh_dmae_chan *sh_chan;
533
534 if (!chan)
535 return NULL;
536
537 sh_chan = to_sh_chan(chan);
538 param = chan->private;
539
540 /* Someone calling slave DMA on a public channel? */
541 if (!param || !sg_len) {
542 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
543 __func__, param, sg_len, param ? param->slave_id : -1);
544 return NULL;
545 }
546
547 /*
548 * if (param != NULL), this is a successfully requested slave channel,
549 * therefore param->config != NULL too.
550 */
551 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
552 direction, flags);
553}
554
555static void sh_dmae_terminate_all(struct dma_chan *chan)
556{
557 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
558
559 if (!chan)
560 return;
561
562 sh_dmae_chan_ld_cleanup(sh_chan, true);
389} 563}
390 564
391static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 565static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -419,7 +593,11 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
419 cookie = tx->cookie; 593 cookie = tx->cookie;
420 594
421 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 595 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
422 BUG_ON(sh_chan->completed_cookie != desc->cookie - 1); 596 if (sh_chan->completed_cookie != desc->cookie - 1)
597 dev_dbg(sh_chan->dev,
598 "Completing cookie %d, expected %d\n",
599 desc->cookie,
600 sh_chan->completed_cookie + 1);
423 sh_chan->completed_cookie = desc->cookie; 601 sh_chan->completed_cookie = desc->cookie;
424 } 602 }
425 603
@@ -492,7 +670,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
492 return; 670 return;
493 } 671 }
494 672
495 /* Find the first un-transfer desciptor */ 673 /* Find the first not transferred desciptor */
496 list_for_each_entry(sd, &sh_chan->ld_queue, node) 674 list_for_each_entry(sd, &sh_chan->ld_queue, node)
497 if (sd->mark == DESC_SUBMITTED) { 675 if (sd->mark == DESC_SUBMITTED) {
498 /* Get the ld start address from ld_queue */ 676 /* Get the ld start address from ld_queue */
@@ -559,7 +737,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
559 737
560 /* IRQ Multi */ 738 /* IRQ Multi */
561 if (shdev->pdata.mode & SHDMA_MIX_IRQ) { 739 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
562 int cnt = 0; 740 int __maybe_unused cnt = 0;
563 switch (irq) { 741 switch (irq) {
564#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) 742#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
565 case DMTE6_IRQ: 743 case DMTE6_IRQ:
@@ -596,11 +774,14 @@ static void dmae_do_tasklet(unsigned long data)
596 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 774 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
597 struct sh_desc *desc; 775 struct sh_desc *desc;
598 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 776 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
777 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
599 778
600 spin_lock(&sh_chan->desc_lock); 779 spin_lock(&sh_chan->desc_lock);
601 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 780 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
602 if ((desc->hw.sar + desc->hw.tcr) == sar_buf && 781 if (desc->mark == DESC_SUBMITTED &&
603 desc->mark == DESC_SUBMITTED) { 782 ((desc->direction == DMA_FROM_DEVICE &&
783 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
784 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
604 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", 785 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
605 desc->async_tx.cookie, &desc->async_tx, 786 desc->async_tx.cookie, &desc->async_tx,
606 desc->hw.dar); 787 desc->hw.dar);
@@ -673,7 +854,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
673 } 854 }
674 855
675 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 856 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
676 "sh-dmae%d", new_sh_chan->id); 857 "sh-dmae%d", new_sh_chan->id);
677 858
678 /* set up channel irq */ 859 /* set up channel irq */
679 err = request_irq(irq, &sh_dmae_interrupt, irqflags, 860 err = request_irq(irq, &sh_dmae_interrupt, irqflags,
@@ -684,11 +865,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
684 goto err_no_irq; 865 goto err_no_irq;
685 } 866 }
686 867
687 /* CHCR register control function */
688 new_sh_chan->set_chcr = dmae_set_chcr;
689 /* DMARS register control function */
690 new_sh_chan->set_dmars = dmae_set_dmars;
691
692 shdev->chan[id] = new_sh_chan; 868 shdev->chan[id] = new_sh_chan;
693 return 0; 869 return 0;
694 870
@@ -759,12 +935,19 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
759 INIT_LIST_HEAD(&shdev->common.channels); 935 INIT_LIST_HEAD(&shdev->common.channels);
760 936
761 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 937 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
938 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
939
762 shdev->common.device_alloc_chan_resources 940 shdev->common.device_alloc_chan_resources
763 = sh_dmae_alloc_chan_resources; 941 = sh_dmae_alloc_chan_resources;
764 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; 942 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
765 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; 943 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
766 shdev->common.device_is_tx_complete = sh_dmae_is_complete; 944 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
767 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 945 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
946
947 /* Compulsory for DMA_SLAVE fields */
948 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
949 shdev->common.device_terminate_all = sh_dmae_terminate_all;
950
768 shdev->common.dev = &pdev->dev; 951 shdev->common.dev = &pdev->dev;
769 /* Default transfer size of 32 bytes requires 32-byte alignment */ 952 /* Default transfer size of 32 bytes requires 32-byte alignment */
770 shdev->common.copy_align = 5; 953 shdev->common.copy_align = 5;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 108f1cffb6f5..7e227f3c87c4 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -29,6 +29,7 @@ struct sh_desc {
29 struct sh_dmae_regs hw; 29 struct sh_dmae_regs hw;
30 struct list_head node; 30 struct list_head node;
31 struct dma_async_tx_descriptor async_tx; 31 struct dma_async_tx_descriptor async_tx;
32 enum dma_data_direction direction;
32 dma_cookie_t cookie; 33 dma_cookie_t cookie;
33 int chunks; 34 int chunks;
34 int mark; 35 int mark;
@@ -45,13 +46,9 @@ struct sh_dmae_chan {
45 struct device *dev; /* Channel device */ 46 struct device *dev; /* Channel device */
46 struct tasklet_struct tasklet; /* Tasklet */ 47 struct tasklet_struct tasklet; /* Tasklet */
47 int descs_allocated; /* desc count */ 48 int descs_allocated; /* desc count */
49 int xmit_shift; /* log_2(bytes_per_xfer) */
48 int id; /* Raw id of this channel */ 50 int id; /* Raw id of this channel */
49 char dev_id[16]; /* unique name per DMAC of channel */ 51 char dev_id[16]; /* unique name per DMAC of channel */
50
51 /* Set chcr */
52 int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
53 /* Set DMA resource */
54 int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res);
55}; 52};
56 53
57struct sh_dmae_device { 54struct sh_dmae_device {