aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-03 15:13:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-03 15:13:45 -0400
commit133e2a3164771454aa326859c2b293687189b553 (patch)
tree4e21f63be087738d7ffe7526d41e15140fc63ff0
parent20bec8ab1458c24bed0d5492ee15d87807fc415a (diff)
parent8c6db1bbf80123839ec87bdd6cb364aea384623d (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: dma: Add SoF and EoF debugging to ipu_idmac.c, minor cleanup dw_dmac: add cyclic API to DW DMA driver dmaengine: Add privatecnt to revert DMA_PRIVATE property dmatest: add dma interrupts and callbacks dmatest: add xor test dmaengine: allow dma support for async_tx to be toggled async_tx: provide __async_inline for HAS_DMA=n archs dmaengine: kill some unused headers dmaengine: initialize tx_list in dma_async_tx_descriptor_init dma: i.MX31 IPU DMA robustness improvements dma: improve section assignment in i.MX31 IPU DMA driver dma: ipu_idmac driver cosmetic clean-up dmaengine: fail device registration if channel registration fails
-rw-r--r--crypto/async_tx/async_tx.c6
-rw-r--r--crypto/async_tx/async_xor.c7
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/dmaengine.c60
-rw-r--r--drivers/dma/dmatest.c307
-rw-r--r--drivers/dma/dw_dmac.c333
-rw-r--r--drivers/dma/dw_dmac_regs.h7
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/ioat_dma.c1
-rw-r--r--drivers/dma/iop-adma.c1
-rw-r--r--drivers/dma/ipu/ipu_idmac.c371
-rw-r--r--drivers/dma/ipu/ipu_irq.c2
-rw-r--r--drivers/dma/mv_xor.c1
-rw-r--r--include/linux/async_tx.h9
-rw-r--r--include/linux/dmaengine.h30
-rw-r--r--include/linux/dw_dmac.h19
16 files changed, 925 insertions, 241 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index f21147f3626a..06eb6cc09fef 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -30,7 +30,7 @@
30#ifdef CONFIG_DMA_ENGINE 30#ifdef CONFIG_DMA_ENGINE
31static int __init async_tx_init(void) 31static int __init async_tx_init(void)
32{ 32{
33 dmaengine_get(); 33 async_dmaengine_get();
34 34
35 printk(KERN_INFO "async_tx: api initialized (async)\n"); 35 printk(KERN_INFO "async_tx: api initialized (async)\n");
36 36
@@ -39,7 +39,7 @@ static int __init async_tx_init(void)
39 39
40static void __exit async_tx_exit(void) 40static void __exit async_tx_exit(void)
41{ 41{
42 dmaengine_put(); 42 async_dmaengine_put();
43} 43}
44 44
45/** 45/**
@@ -56,7 +56,7 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
56 if (depend_tx && 56 if (depend_tx &&
57 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) 57 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
58 return depend_tx->chan; 58 return depend_tx->chan;
59 return dma_find_channel(tx_type); 59 return async_dma_find_channel(tx_type);
60} 60}
61EXPORT_SYMBOL_GPL(__async_tx_find_channel); 61EXPORT_SYMBOL_GPL(__async_tx_find_channel);
62#else 62#else
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 595b78672b36..95fe2c8d6c51 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -30,11 +30,8 @@
30#include <linux/raid/xor.h> 30#include <linux/raid/xor.h>
31#include <linux/async_tx.h> 31#include <linux/async_tx.h>
32 32
33/* do_async_xor - dma map the pages and perform the xor with an engine. 33/* do_async_xor - dma map the pages and perform the xor with an engine */
34 * This routine is marked __always_inline so it can be compiled away 34static __async_inline struct dma_async_tx_descriptor *
35 * when CONFIG_DMA_ENGINE=n
36 */
37static __always_inline struct dma_async_tx_descriptor *
38do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, 35do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
39 unsigned int offset, int src_cnt, size_t len, 36 unsigned int offset, int src_cnt, size_t len,
40 enum async_tx_flags flags, 37 enum async_tx_flags flags,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 48ea59e79672..3b3c01b6f1ee 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -98,6 +98,17 @@ config NET_DMA
98 Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise 98 Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
99 say N. 99 say N.
100 100
101config ASYNC_TX_DMA
102 bool "Async_tx: Offload support for the async_tx api"
103 depends on DMA_ENGINE
104 help
105 This allows the async_tx api to take advantage of offload engines for
106 memcpy, memset, xor, and raid6 p+q operations. If your platform has
107 a dma engine that can perform raid operations and you have enabled
108 MD_RAID456 say Y.
109
110 If unsure, say N.
111
101config DMATEST 112config DMATEST
102 tristate "DMA Test client" 113 tristate "DMA Test client"
103 depends on DMA_ENGINE 114 depends on DMA_ENGINE
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 280a9d263eb3..92438e9dacc3 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -507,6 +507,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
507 * published in the general-purpose allocator 507 * published in the general-purpose allocator
508 */ 508 */
509 dma_cap_set(DMA_PRIVATE, device->cap_mask); 509 dma_cap_set(DMA_PRIVATE, device->cap_mask);
510 device->privatecnt++;
510 err = dma_chan_get(chan); 511 err = dma_chan_get(chan);
511 512
512 if (err == -ENODEV) { 513 if (err == -ENODEV) {
@@ -518,6 +519,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
518 dma_chan_name(chan), err); 519 dma_chan_name(chan), err);
519 else 520 else
520 break; 521 break;
522 if (--device->privatecnt == 0)
523 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
521 chan->private = NULL; 524 chan->private = NULL;
522 chan = NULL; 525 chan = NULL;
523 } 526 }
@@ -537,6 +540,9 @@ void dma_release_channel(struct dma_chan *chan)
537 WARN_ONCE(chan->client_count != 1, 540 WARN_ONCE(chan->client_count != 1,
538 "chan reference count %d != 1\n", chan->client_count); 541 "chan reference count %d != 1\n", chan->client_count);
539 dma_chan_put(chan); 542 dma_chan_put(chan);
543 /* drop PRIVATE cap enabled by __dma_request_channel() */
544 if (--chan->device->privatecnt == 0)
545 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
540 chan->private = NULL; 546 chan->private = NULL;
541 mutex_unlock(&dma_list_mutex); 547 mutex_unlock(&dma_list_mutex);
542} 548}
@@ -602,6 +608,24 @@ void dmaengine_put(void)
602} 608}
603EXPORT_SYMBOL(dmaengine_put); 609EXPORT_SYMBOL(dmaengine_put);
604 610
611static int get_dma_id(struct dma_device *device)
612{
613 int rc;
614
615 idr_retry:
616 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
617 return -ENOMEM;
618 mutex_lock(&dma_list_mutex);
619 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
620 mutex_unlock(&dma_list_mutex);
621 if (rc == -EAGAIN)
622 goto idr_retry;
623 else if (rc != 0)
624 return rc;
625
626 return 0;
627}
628
605/** 629/**
606 * dma_async_device_register - registers DMA devices found 630 * dma_async_device_register - registers DMA devices found
607 * @device: &dma_device 631 * @device: &dma_device
@@ -640,27 +664,25 @@ int dma_async_device_register(struct dma_device *device)
640 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 664 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
641 if (!idr_ref) 665 if (!idr_ref)
642 return -ENOMEM; 666 return -ENOMEM;
643 atomic_set(idr_ref, 0); 667 rc = get_dma_id(device);
644 idr_retry: 668 if (rc != 0) {
645 if (!idr_pre_get(&dma_idr, GFP_KERNEL)) 669 kfree(idr_ref);
646 return -ENOMEM;
647 mutex_lock(&dma_list_mutex);
648 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
649 mutex_unlock(&dma_list_mutex);
650 if (rc == -EAGAIN)
651 goto idr_retry;
652 else if (rc != 0)
653 return rc; 670 return rc;
671 }
672
673 atomic_set(idr_ref, 0);
654 674
655 /* represent channels in sysfs. Probably want devs too */ 675 /* represent channels in sysfs. Probably want devs too */
656 list_for_each_entry(chan, &device->channels, device_node) { 676 list_for_each_entry(chan, &device->channels, device_node) {
677 rc = -ENOMEM;
657 chan->local = alloc_percpu(typeof(*chan->local)); 678 chan->local = alloc_percpu(typeof(*chan->local));
658 if (chan->local == NULL) 679 if (chan->local == NULL)
659 continue; 680 goto err_out;
660 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); 681 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
661 if (chan->dev == NULL) { 682 if (chan->dev == NULL) {
662 free_percpu(chan->local); 683 free_percpu(chan->local);
663 continue; 684 chan->local = NULL;
685 goto err_out;
664 } 686 }
665 687
666 chan->chan_id = chancnt++; 688 chan->chan_id = chancnt++;
@@ -677,6 +699,8 @@ int dma_async_device_register(struct dma_device *device)
677 if (rc) { 699 if (rc) {
678 free_percpu(chan->local); 700 free_percpu(chan->local);
679 chan->local = NULL; 701 chan->local = NULL;
702 kfree(chan->dev);
703 atomic_dec(idr_ref);
680 goto err_out; 704 goto err_out;
681 } 705 }
682 chan->client_count = 0; 706 chan->client_count = 0;
@@ -701,12 +725,23 @@ int dma_async_device_register(struct dma_device *device)
701 } 725 }
702 } 726 }
703 list_add_tail_rcu(&device->global_node, &dma_device_list); 727 list_add_tail_rcu(&device->global_node, &dma_device_list);
728 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
729 device->privatecnt++; /* Always private */
704 dma_channel_rebalance(); 730 dma_channel_rebalance();
705 mutex_unlock(&dma_list_mutex); 731 mutex_unlock(&dma_list_mutex);
706 732
707 return 0; 733 return 0;
708 734
709err_out: 735err_out:
736 /* if we never registered a channel just release the idr */
737 if (atomic_read(idr_ref) == 0) {
738 mutex_lock(&dma_list_mutex);
739 idr_remove(&dma_idr, device->dev_id);
740 mutex_unlock(&dma_list_mutex);
741 kfree(idr_ref);
742 return rc;
743 }
744
710 list_for_each_entry(chan, &device->channels, device_node) { 745 list_for_each_entry(chan, &device->channels, device_node) {
711 if (chan->local == NULL) 746 if (chan->local == NULL)
712 continue; 747 continue;
@@ -893,6 +928,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
893{ 928{
894 tx->chan = chan; 929 tx->chan = chan;
895 spin_lock_init(&tx->lock); 930 spin_lock_init(&tx->lock);
931 INIT_LIST_HEAD(&tx->tx_list);
896} 932}
897EXPORT_SYMBOL(dma_async_tx_descriptor_init); 933EXPORT_SYMBOL(dma_async_tx_descriptor_init);
898 934
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e190d8b30700..a27c0fb1bc11 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -38,6 +38,11 @@ module_param(max_channels, uint, S_IRUGO);
38MODULE_PARM_DESC(max_channels, 38MODULE_PARM_DESC(max_channels,
39 "Maximum number of channels to use (default: all)"); 39 "Maximum number of channels to use (default: all)");
40 40
41static unsigned int xor_sources = 3;
42module_param(xor_sources, uint, S_IRUGO);
43MODULE_PARM_DESC(xor_sources,
44 "Number of xor source buffers (default: 3)");
45
41/* 46/*
42 * Initialization patterns. All bytes in the source buffer has bit 7 47 * Initialization patterns. All bytes in the source buffer has bit 7
43 * set, all bytes in the destination buffer has bit 7 cleared. 48 * set, all bytes in the destination buffer has bit 7 cleared.
@@ -59,8 +64,9 @@ struct dmatest_thread {
59 struct list_head node; 64 struct list_head node;
60 struct task_struct *task; 65 struct task_struct *task;
61 struct dma_chan *chan; 66 struct dma_chan *chan;
62 u8 *srcbuf; 67 u8 **srcs;
63 u8 *dstbuf; 68 u8 **dsts;
69 enum dma_transaction_type type;
64}; 70};
65 71
66struct dmatest_chan { 72struct dmatest_chan {
@@ -98,30 +104,37 @@ static unsigned long dmatest_random(void)
98 return buf; 104 return buf;
99} 105}
100 106
101static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len) 107static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
102{ 108{
103 unsigned int i; 109 unsigned int i;
104 110 u8 *buf;
105 for (i = 0; i < start; i++) 111
106 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 112 for (; (buf = *bufs); bufs++) {
107 for ( ; i < start + len; i++) 113 for (i = 0; i < start; i++)
108 buf[i] = PATTERN_SRC | PATTERN_COPY 114 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
109 | (~i & PATTERN_COUNT_MASK);; 115 for ( ; i < start + len; i++)
110 for ( ; i < test_buf_size; i++) 116 buf[i] = PATTERN_SRC | PATTERN_COPY
111 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 117 | (~i & PATTERN_COUNT_MASK);;
118 for ( ; i < test_buf_size; i++)
119 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
120 buf++;
121 }
112} 122}
113 123
114static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len) 124static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
115{ 125{
116 unsigned int i; 126 unsigned int i;
117 127 u8 *buf;
118 for (i = 0; i < start; i++) 128
119 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); 129 for (; (buf = *bufs); bufs++) {
120 for ( ; i < start + len; i++) 130 for (i = 0; i < start; i++)
121 buf[i] = PATTERN_DST | PATTERN_OVERWRITE 131 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
122 | (~i & PATTERN_COUNT_MASK); 132 for ( ; i < start + len; i++)
123 for ( ; i < test_buf_size; i++) 133 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
124 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); 134 | (~i & PATTERN_COUNT_MASK);
135 for ( ; i < test_buf_size; i++)
136 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
137 }
125} 138}
126 139
127static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, 140static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
@@ -150,23 +163,30 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
150 thread_name, index, expected, actual); 163 thread_name, index, expected, actual);
151} 164}
152 165
153static unsigned int dmatest_verify(u8 *buf, unsigned int start, 166static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
154 unsigned int end, unsigned int counter, u8 pattern, 167 unsigned int end, unsigned int counter, u8 pattern,
155 bool is_srcbuf) 168 bool is_srcbuf)
156{ 169{
157 unsigned int i; 170 unsigned int i;
158 unsigned int error_count = 0; 171 unsigned int error_count = 0;
159 u8 actual; 172 u8 actual;
160 173 u8 expected;
161 for (i = start; i < end; i++) { 174 u8 *buf;
162 actual = buf[i]; 175 unsigned int counter_orig = counter;
163 if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) { 176
164 if (error_count < 32) 177 for (; (buf = *bufs); bufs++) {
165 dmatest_mismatch(actual, pattern, i, counter, 178 counter = counter_orig;
166 is_srcbuf); 179 for (i = start; i < end; i++) {
167 error_count++; 180 actual = buf[i];
181 expected = pattern | (~counter & PATTERN_COUNT_MASK);
182 if (actual != expected) {
183 if (error_count < 32)
184 dmatest_mismatch(actual, pattern, i,
185 counter, is_srcbuf);
186 error_count++;
187 }
188 counter++;
168 } 189 }
169 counter++;
170 } 190 }
171 191
172 if (error_count > 32) 192 if (error_count > 32)
@@ -176,12 +196,17 @@ static unsigned int dmatest_verify(u8 *buf, unsigned int start,
176 return error_count; 196 return error_count;
177} 197}
178 198
199static void dmatest_callback(void *completion)
200{
201 complete(completion);
202}
203
179/* 204/*
180 * This function repeatedly tests DMA transfers of various lengths and 205 * This function repeatedly tests DMA transfers of various lengths and
181 * offsets until it is told to exit by kthread_stop(). There may be 206 * offsets for a given operation type until it is told to exit by
182 * multiple threads running this function in parallel for a single 207 * kthread_stop(). There may be multiple threads running this function
183 * channel, and there may be multiple channels being tested in 208 * in parallel for a single channel, and there may be multiple channels
184 * parallel. 209 * being tested in parallel.
185 * 210 *
186 * Before each test, the source and destination buffer is initialized 211 * Before each test, the source and destination buffer is initialized
187 * with a known pattern. This pattern is different depending on 212 * with a known pattern. This pattern is different depending on
@@ -201,25 +226,57 @@ static int dmatest_func(void *data)
201 unsigned int total_tests = 0; 226 unsigned int total_tests = 0;
202 dma_cookie_t cookie; 227 dma_cookie_t cookie;
203 enum dma_status status; 228 enum dma_status status;
229 enum dma_ctrl_flags flags;
204 int ret; 230 int ret;
231 int src_cnt;
232 int dst_cnt;
233 int i;
205 234
206 thread_name = current->comm; 235 thread_name = current->comm;
207 236
208 ret = -ENOMEM; 237 ret = -ENOMEM;
209 thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL);
210 if (!thread->srcbuf)
211 goto err_srcbuf;
212 thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL);
213 if (!thread->dstbuf)
214 goto err_dstbuf;
215 238
216 smp_rmb(); 239 smp_rmb();
217 chan = thread->chan; 240 chan = thread->chan;
241 if (thread->type == DMA_MEMCPY)
242 src_cnt = dst_cnt = 1;
243 else if (thread->type == DMA_XOR) {
244 src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
245 dst_cnt = 1;
246 } else
247 goto err_srcs;
248
249 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
250 if (!thread->srcs)
251 goto err_srcs;
252 for (i = 0; i < src_cnt; i++) {
253 thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
254 if (!thread->srcs[i])
255 goto err_srcbuf;
256 }
257 thread->srcs[i] = NULL;
258
259 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
260 if (!thread->dsts)
261 goto err_dsts;
262 for (i = 0; i < dst_cnt; i++) {
263 thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
264 if (!thread->dsts[i])
265 goto err_dstbuf;
266 }
267 thread->dsts[i] = NULL;
268
269 set_user_nice(current, 10);
270
271 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT;
218 272
219 while (!kthread_should_stop()) { 273 while (!kthread_should_stop()) {
220 struct dma_device *dev = chan->device; 274 struct dma_device *dev = chan->device;
221 struct dma_async_tx_descriptor *tx; 275 struct dma_async_tx_descriptor *tx = NULL;
222 dma_addr_t dma_src, dma_dest; 276 dma_addr_t dma_srcs[src_cnt];
277 dma_addr_t dma_dsts[dst_cnt];
278 struct completion cmp;
279 unsigned long tmo = msecs_to_jiffies(3000);
223 280
224 total_tests++; 281 total_tests++;
225 282
@@ -227,22 +284,41 @@ static int dmatest_func(void *data)
227 src_off = dmatest_random() % (test_buf_size - len + 1); 284 src_off = dmatest_random() % (test_buf_size - len + 1);
228 dst_off = dmatest_random() % (test_buf_size - len + 1); 285 dst_off = dmatest_random() % (test_buf_size - len + 1);
229 286
230 dmatest_init_srcbuf(thread->srcbuf, src_off, len); 287 dmatest_init_srcs(thread->srcs, src_off, len);
231 dmatest_init_dstbuf(thread->dstbuf, dst_off, len); 288 dmatest_init_dsts(thread->dsts, dst_off, len);
232 289
233 dma_src = dma_map_single(dev->dev, thread->srcbuf + src_off, 290 for (i = 0; i < src_cnt; i++) {
234 len, DMA_TO_DEVICE); 291 u8 *buf = thread->srcs[i] + src_off;
292
293 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
294 DMA_TO_DEVICE);
295 }
235 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 296 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
236 dma_dest = dma_map_single(dev->dev, thread->dstbuf, 297 for (i = 0; i < dst_cnt; i++) {
237 test_buf_size, DMA_BIDIRECTIONAL); 298 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
299 test_buf_size,
300 DMA_BIDIRECTIONAL);
301 }
302
303 if (thread->type == DMA_MEMCPY)
304 tx = dev->device_prep_dma_memcpy(chan,
305 dma_dsts[0] + dst_off,
306 dma_srcs[0], len,
307 flags);
308 else if (thread->type == DMA_XOR)
309 tx = dev->device_prep_dma_xor(chan,
310 dma_dsts[0] + dst_off,
311 dma_srcs, xor_sources,
312 len, flags);
238 313
239 tx = dev->device_prep_dma_memcpy(chan, dma_dest + dst_off,
240 dma_src, len,
241 DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP);
242 if (!tx) { 314 if (!tx) {
243 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 315 for (i = 0; i < src_cnt; i++)
244 dma_unmap_single(dev->dev, dma_dest, 316 dma_unmap_single(dev->dev, dma_srcs[i], len,
245 test_buf_size, DMA_BIDIRECTIONAL); 317 DMA_TO_DEVICE);
318 for (i = 0; i < dst_cnt; i++)
319 dma_unmap_single(dev->dev, dma_dsts[i],
320 test_buf_size,
321 DMA_BIDIRECTIONAL);
246 pr_warning("%s: #%u: prep error with src_off=0x%x " 322 pr_warning("%s: #%u: prep error with src_off=0x%x "
247 "dst_off=0x%x len=0x%x\n", 323 "dst_off=0x%x len=0x%x\n",
248 thread_name, total_tests - 1, 324 thread_name, total_tests - 1,
@@ -251,7 +327,10 @@ static int dmatest_func(void *data)
251 failed_tests++; 327 failed_tests++;
252 continue; 328 continue;
253 } 329 }
254 tx->callback = NULL; 330
331 init_completion(&cmp);
332 tx->callback = dmatest_callback;
333 tx->callback_param = &cmp;
255 cookie = tx->tx_submit(tx); 334 cookie = tx->tx_submit(tx);
256 335
257 if (dma_submit_error(cookie)) { 336 if (dma_submit_error(cookie)) {
@@ -263,44 +342,50 @@ static int dmatest_func(void *data)
263 failed_tests++; 342 failed_tests++;
264 continue; 343 continue;
265 } 344 }
266 dma_async_memcpy_issue_pending(chan); 345 dma_async_issue_pending(chan);
267 346
268 do { 347 tmo = wait_for_completion_timeout(&cmp, tmo);
269 msleep(1); 348 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
270 status = dma_async_memcpy_complete(
271 chan, cookie, NULL, NULL);
272 } while (status == DMA_IN_PROGRESS);
273 349
274 if (status == DMA_ERROR) { 350 if (tmo == 0) {
275 pr_warning("%s: #%u: error during copy\n", 351 pr_warning("%s: #%u: test timed out\n",
276 thread_name, total_tests - 1); 352 thread_name, total_tests - 1);
353 failed_tests++;
354 continue;
355 } else if (status != DMA_SUCCESS) {
356 pr_warning("%s: #%u: got completion callback,"
357 " but status is \'%s\'\n",
358 thread_name, total_tests - 1,
359 status == DMA_ERROR ? "error" : "in progress");
277 failed_tests++; 360 failed_tests++;
278 continue; 361 continue;
279 } 362 }
363
280 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ 364 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
281 dma_unmap_single(dev->dev, dma_dest, 365 for (i = 0; i < dst_cnt; i++)
282 test_buf_size, DMA_BIDIRECTIONAL); 366 dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
367 DMA_BIDIRECTIONAL);
283 368
284 error_count = 0; 369 error_count = 0;
285 370
286 pr_debug("%s: verifying source buffer...\n", thread_name); 371 pr_debug("%s: verifying source buffer...\n", thread_name);
287 error_count += dmatest_verify(thread->srcbuf, 0, src_off, 372 error_count += dmatest_verify(thread->srcs, 0, src_off,
288 0, PATTERN_SRC, true); 373 0, PATTERN_SRC, true);
289 error_count += dmatest_verify(thread->srcbuf, src_off, 374 error_count += dmatest_verify(thread->srcs, src_off,
290 src_off + len, src_off, 375 src_off + len, src_off,
291 PATTERN_SRC | PATTERN_COPY, true); 376 PATTERN_SRC | PATTERN_COPY, true);
292 error_count += dmatest_verify(thread->srcbuf, src_off + len, 377 error_count += dmatest_verify(thread->srcs, src_off + len,
293 test_buf_size, src_off + len, 378 test_buf_size, src_off + len,
294 PATTERN_SRC, true); 379 PATTERN_SRC, true);
295 380
296 pr_debug("%s: verifying dest buffer...\n", 381 pr_debug("%s: verifying dest buffer...\n",
297 thread->task->comm); 382 thread->task->comm);
298 error_count += dmatest_verify(thread->dstbuf, 0, dst_off, 383 error_count += dmatest_verify(thread->dsts, 0, dst_off,
299 0, PATTERN_DST, false); 384 0, PATTERN_DST, false);
300 error_count += dmatest_verify(thread->dstbuf, dst_off, 385 error_count += dmatest_verify(thread->dsts, dst_off,
301 dst_off + len, src_off, 386 dst_off + len, src_off,
302 PATTERN_SRC | PATTERN_COPY, false); 387 PATTERN_SRC | PATTERN_COPY, false);
303 error_count += dmatest_verify(thread->dstbuf, dst_off + len, 388 error_count += dmatest_verify(thread->dsts, dst_off + len,
304 test_buf_size, dst_off + len, 389 test_buf_size, dst_off + len,
305 PATTERN_DST, false); 390 PATTERN_DST, false);
306 391
@@ -319,10 +404,16 @@ static int dmatest_func(void *data)
319 } 404 }
320 405
321 ret = 0; 406 ret = 0;
322 kfree(thread->dstbuf); 407 for (i = 0; thread->dsts[i]; i++)
408 kfree(thread->dsts[i]);
323err_dstbuf: 409err_dstbuf:
324 kfree(thread->srcbuf); 410 kfree(thread->dsts);
411err_dsts:
412 for (i = 0; thread->srcs[i]; i++)
413 kfree(thread->srcs[i]);
325err_srcbuf: 414err_srcbuf:
415 kfree(thread->srcs);
416err_srcs:
326 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 417 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
327 thread_name, total_tests, failed_tests, ret); 418 thread_name, total_tests, failed_tests, ret);
328 return ret; 419 return ret;
@@ -344,35 +435,36 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
344 kfree(dtc); 435 kfree(dtc);
345} 436}
346 437
347static int dmatest_add_channel(struct dma_chan *chan) 438static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
348{ 439{
349 struct dmatest_chan *dtc; 440 struct dmatest_thread *thread;
350 struct dmatest_thread *thread; 441 struct dma_chan *chan = dtc->chan;
351 unsigned int i; 442 char *op;
352 443 unsigned int i;
353 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
354 if (!dtc) {
355 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
356 return -ENOMEM;
357 }
358 444
359 dtc->chan = chan; 445 if (type == DMA_MEMCPY)
360 INIT_LIST_HEAD(&dtc->threads); 446 op = "copy";
447 else if (type == DMA_XOR)
448 op = "xor";
449 else
450 return -EINVAL;
361 451
362 for (i = 0; i < threads_per_chan; i++) { 452 for (i = 0; i < threads_per_chan; i++) {
363 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); 453 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
364 if (!thread) { 454 if (!thread) {
365 pr_warning("dmatest: No memory for %s-test%u\n", 455 pr_warning("dmatest: No memory for %s-%s%u\n",
366 dma_chan_name(chan), i); 456 dma_chan_name(chan), op, i);
457
367 break; 458 break;
368 } 459 }
369 thread->chan = dtc->chan; 460 thread->chan = dtc->chan;
461 thread->type = type;
370 smp_wmb(); 462 smp_wmb();
371 thread->task = kthread_run(dmatest_func, thread, "%s-test%u", 463 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
372 dma_chan_name(chan), i); 464 dma_chan_name(chan), op, i);
373 if (IS_ERR(thread->task)) { 465 if (IS_ERR(thread->task)) {
374 pr_warning("dmatest: Failed to run thread %s-test%u\n", 466 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
375 dma_chan_name(chan), i); 467 dma_chan_name(chan), op, i);
376 kfree(thread); 468 kfree(thread);
377 break; 469 break;
378 } 470 }
@@ -382,7 +474,36 @@ static int dmatest_add_channel(struct dma_chan *chan)
382 list_add_tail(&thread->node, &dtc->threads); 474 list_add_tail(&thread->node, &dtc->threads);
383 } 475 }
384 476
385 pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan)); 477 return i;
478}
479
480static int dmatest_add_channel(struct dma_chan *chan)
481{
482 struct dmatest_chan *dtc;
483 struct dma_device *dma_dev = chan->device;
484 unsigned int thread_count = 0;
485 unsigned int cnt;
486
487 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
488 if (!dtc) {
489 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
490 return -ENOMEM;
491 }
492
493 dtc->chan = chan;
494 INIT_LIST_HEAD(&dtc->threads);
495
496 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
497 cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
498 thread_count += cnt > 0 ?: 0;
499 }
500 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
501 cnt = dmatest_add_threads(dtc, DMA_XOR);
502 thread_count += cnt > 0 ?: 0;
503 }
504
505 pr_info("dmatest: Started %u threads using %s\n",
506 thread_count, dma_chan_name(chan));
386 507
387 list_add_tail(&dtc->node, &dmatest_channels); 508 list_add_tail(&dtc->node, &dmatest_channels);
388 nr_channels++; 509 nr_channels++;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 20ad3d26bec2..98c9a847bf51 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -363,6 +363,82 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
363 dwc_descriptor_complete(dwc, bad_desc); 363 dwc_descriptor_complete(dwc, bad_desc);
364} 364}
365 365
366/* --------------------- Cyclic DMA API extensions -------------------- */
367
368inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
369{
370 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
371 return channel_readl(dwc, SAR);
372}
373EXPORT_SYMBOL(dw_dma_get_src_addr);
374
375inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
376{
377 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
378 return channel_readl(dwc, DAR);
379}
380EXPORT_SYMBOL(dw_dma_get_dst_addr);
381
382/* called with dwc->lock held and all DMAC interrupts disabled */
383static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
384 u32 status_block, u32 status_err, u32 status_xfer)
385{
386 if (status_block & dwc->mask) {
387 void (*callback)(void *param);
388 void *callback_param;
389
390 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
391 channel_readl(dwc, LLP));
392 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
393
394 callback = dwc->cdesc->period_callback;
395 callback_param = dwc->cdesc->period_callback_param;
396 if (callback) {
397 spin_unlock(&dwc->lock);
398 callback(callback_param);
399 spin_lock(&dwc->lock);
400 }
401 }
402
403 /*
404 * Error and transfer complete are highly unlikely, and will most
405 * likely be due to a configuration error by the user.
406 */
407 if (unlikely(status_err & dwc->mask) ||
408 unlikely(status_xfer & dwc->mask)) {
409 int i;
410
411 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
412 "interrupt, stopping DMA transfer\n",
413 status_xfer ? "xfer" : "error");
414 dev_err(chan2dev(&dwc->chan),
415 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
416 channel_readl(dwc, SAR),
417 channel_readl(dwc, DAR),
418 channel_readl(dwc, LLP),
419 channel_readl(dwc, CTL_HI),
420 channel_readl(dwc, CTL_LO));
421
422 channel_clear_bit(dw, CH_EN, dwc->mask);
423 while (dma_readl(dw, CH_EN) & dwc->mask)
424 cpu_relax();
425
426 /* make sure DMA does not restart by loading a new list */
427 channel_writel(dwc, LLP, 0);
428 channel_writel(dwc, CTL_LO, 0);
429 channel_writel(dwc, CTL_HI, 0);
430
431 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
432 dma_writel(dw, CLEAR.ERROR, dwc->mask);
433 dma_writel(dw, CLEAR.XFER, dwc->mask);
434
435 for (i = 0; i < dwc->cdesc->periods; i++)
436 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
437 }
438}
439
440/* ------------------------------------------------------------------------- */
441
366static void dw_dma_tasklet(unsigned long data) 442static void dw_dma_tasklet(unsigned long data)
367{ 443{
368 struct dw_dma *dw = (struct dw_dma *)data; 444 struct dw_dma *dw = (struct dw_dma *)data;
@@ -382,7 +458,10 @@ static void dw_dma_tasklet(unsigned long data)
382 for (i = 0; i < dw->dma.chancnt; i++) { 458 for (i = 0; i < dw->dma.chancnt; i++) {
383 dwc = &dw->chan[i]; 459 dwc = &dw->chan[i];
384 spin_lock(&dwc->lock); 460 spin_lock(&dwc->lock);
385 if (status_err & (1 << i)) 461 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
462 dwc_handle_cyclic(dw, dwc, status_block, status_err,
463 status_xfer);
464 else if (status_err & (1 << i))
386 dwc_handle_error(dw, dwc); 465 dwc_handle_error(dw, dwc);
387 else if ((status_block | status_xfer) & (1 << i)) 466 else if ((status_block | status_xfer) & (1 << i))
388 dwc_scan_descriptors(dw, dwc); 467 dwc_scan_descriptors(dw, dwc);
@@ -826,7 +905,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
826 dma_async_tx_descriptor_init(&desc->txd, chan); 905 dma_async_tx_descriptor_init(&desc->txd, chan);
827 desc->txd.tx_submit = dwc_tx_submit; 906 desc->txd.tx_submit = dwc_tx_submit;
828 desc->txd.flags = DMA_CTRL_ACK; 907 desc->txd.flags = DMA_CTRL_ACK;
829 INIT_LIST_HEAD(&desc->txd.tx_list);
830 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, 908 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
831 sizeof(desc->lli), DMA_TO_DEVICE); 909 sizeof(desc->lli), DMA_TO_DEVICE);
832 dwc_desc_put(dwc, desc); 910 dwc_desc_put(dwc, desc);
@@ -884,6 +962,257 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
884 dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); 962 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
885} 963}
886 964
965/* --------------------- Cyclic DMA API extensions -------------------- */
966
967/**
968 * dw_dma_cyclic_start - start the cyclic DMA transfer
969 * @chan: the DMA channel to start
970 *
971 * Must be called with soft interrupts disabled. Returns zero on success or
972 * -errno on failure.
973 */
974int dw_dma_cyclic_start(struct dma_chan *chan)
975{
976 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
977 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
978
979 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
980 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
981 return -ENODEV;
982 }
983
984 spin_lock(&dwc->lock);
985
986 /* assert channel is idle */
987 if (dma_readl(dw, CH_EN) & dwc->mask) {
988 dev_err(chan2dev(&dwc->chan),
989 "BUG: Attempted to start non-idle channel\n");
990 dev_err(chan2dev(&dwc->chan),
991 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
992 channel_readl(dwc, SAR),
993 channel_readl(dwc, DAR),
994 channel_readl(dwc, LLP),
995 channel_readl(dwc, CTL_HI),
996 channel_readl(dwc, CTL_LO));
997 spin_unlock(&dwc->lock);
998 return -EBUSY;
999 }
1000
1001 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1002 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1003 dma_writel(dw, CLEAR.XFER, dwc->mask);
1004
1005 /* setup DMAC channel registers */
1006 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1007 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1008 channel_writel(dwc, CTL_HI, 0);
1009
1010 channel_set_bit(dw, CH_EN, dwc->mask);
1011
1012 spin_unlock(&dwc->lock);
1013
1014 return 0;
1015}
1016EXPORT_SYMBOL(dw_dma_cyclic_start);
1017
1018/**
1019 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1020 * @chan: the DMA channel to stop
1021 *
1022 * Must be called with soft interrupts disabled.
1023 */
1024void dw_dma_cyclic_stop(struct dma_chan *chan)
1025{
1026 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1027 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1028
1029 spin_lock(&dwc->lock);
1030
1031 channel_clear_bit(dw, CH_EN, dwc->mask);
1032 while (dma_readl(dw, CH_EN) & dwc->mask)
1033 cpu_relax();
1034
1035 spin_unlock(&dwc->lock);
1036}
1037EXPORT_SYMBOL(dw_dma_cyclic_stop);
1038
1039/**
1040 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1041 * @chan: the DMA channel to prepare
1042 * @buf_addr: physical DMA address where the buffer starts
1043 * @buf_len: total number of bytes for the entire buffer
1044 * @period_len: number of bytes for each period
1045 * @direction: transfer direction, to or from device
1046 *
1047 * Must be called before trying to start the transfer. Returns a valid struct
1048 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1049 */
1050struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1051 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1052 enum dma_data_direction direction)
1053{
1054 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1055 struct dw_cyclic_desc *cdesc;
1056 struct dw_cyclic_desc *retval = NULL;
1057 struct dw_desc *desc;
1058 struct dw_desc *last = NULL;
1059 struct dw_dma_slave *dws = chan->private;
1060 unsigned long was_cyclic;
1061 unsigned int reg_width;
1062 unsigned int periods;
1063 unsigned int i;
1064
1065 spin_lock_bh(&dwc->lock);
1066 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1067 spin_unlock_bh(&dwc->lock);
1068 dev_dbg(chan2dev(&dwc->chan),
1069 "queue and/or active list are not empty\n");
1070 return ERR_PTR(-EBUSY);
1071 }
1072
1073 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1074 spin_unlock_bh(&dwc->lock);
1075 if (was_cyclic) {
1076 dev_dbg(chan2dev(&dwc->chan),
1077 "channel already prepared for cyclic DMA\n");
1078 return ERR_PTR(-EBUSY);
1079 }
1080
1081 retval = ERR_PTR(-EINVAL);
1082 reg_width = dws->reg_width;
1083 periods = buf_len / period_len;
1084
1085 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1086 if (period_len > (DWC_MAX_COUNT << reg_width))
1087 goto out_err;
1088 if (unlikely(period_len & ((1 << reg_width) - 1)))
1089 goto out_err;
1090 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1091 goto out_err;
1092 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
1093 goto out_err;
1094
1095 retval = ERR_PTR(-ENOMEM);
1096
1097 if (periods > NR_DESCS_PER_CHANNEL)
1098 goto out_err;
1099
1100 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1101 if (!cdesc)
1102 goto out_err;
1103
1104 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1105 if (!cdesc->desc)
1106 goto out_err_alloc;
1107
1108 for (i = 0; i < periods; i++) {
1109 desc = dwc_desc_get(dwc);
1110 if (!desc)
1111 goto out_err_desc_get;
1112
1113 switch (direction) {
1114 case DMA_TO_DEVICE:
1115 desc->lli.dar = dws->tx_reg;
1116 desc->lli.sar = buf_addr + (period_len * i);
1117 desc->lli.ctllo = (DWC_DEFAULT_CTLLO
1118 | DWC_CTLL_DST_WIDTH(reg_width)
1119 | DWC_CTLL_SRC_WIDTH(reg_width)
1120 | DWC_CTLL_DST_FIX
1121 | DWC_CTLL_SRC_INC
1122 | DWC_CTLL_FC_M2P
1123 | DWC_CTLL_INT_EN);
1124 break;
1125 case DMA_FROM_DEVICE:
1126 desc->lli.dar = buf_addr + (period_len * i);
1127 desc->lli.sar = dws->rx_reg;
1128 desc->lli.ctllo = (DWC_DEFAULT_CTLLO
1129 | DWC_CTLL_SRC_WIDTH(reg_width)
1130 | DWC_CTLL_DST_WIDTH(reg_width)
1131 | DWC_CTLL_DST_INC
1132 | DWC_CTLL_SRC_FIX
1133 | DWC_CTLL_FC_P2M
1134 | DWC_CTLL_INT_EN);
1135 break;
1136 default:
1137 break;
1138 }
1139
1140 desc->lli.ctlhi = (period_len >> reg_width);
1141 cdesc->desc[i] = desc;
1142
1143 if (last) {
1144 last->lli.llp = desc->txd.phys;
1145 dma_sync_single_for_device(chan2parent(chan),
1146 last->txd.phys, sizeof(last->lli),
1147 DMA_TO_DEVICE);
1148 }
1149
1150 last = desc;
1151 }
1152
1153 /* lets make a cyclic list */
1154 last->lli.llp = cdesc->desc[0]->txd.phys;
1155 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1156 sizeof(last->lli), DMA_TO_DEVICE);
1157
1158 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
1159 "period %zu periods %d\n", buf_addr, buf_len,
1160 period_len, periods);
1161
1162 cdesc->periods = periods;
1163 dwc->cdesc = cdesc;
1164
1165 return cdesc;
1166
1167out_err_desc_get:
1168 while (i--)
1169 dwc_desc_put(dwc, cdesc->desc[i]);
1170out_err_alloc:
1171 kfree(cdesc);
1172out_err:
1173 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1174 return (struct dw_cyclic_desc *)retval;
1175}
1176EXPORT_SYMBOL(dw_dma_cyclic_prep);
1177
1178/**
1179 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1180 * @chan: the DMA channel to free
1181 */
1182void dw_dma_cyclic_free(struct dma_chan *chan)
1183{
1184 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1185 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1186 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1187 int i;
1188
1189 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1190
1191 if (!cdesc)
1192 return;
1193
1194 spin_lock_bh(&dwc->lock);
1195
1196 channel_clear_bit(dw, CH_EN, dwc->mask);
1197 while (dma_readl(dw, CH_EN) & dwc->mask)
1198 cpu_relax();
1199
1200 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1201 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1202 dma_writel(dw, CLEAR.XFER, dwc->mask);
1203
1204 spin_unlock_bh(&dwc->lock);
1205
1206 for (i = 0; i < cdesc->periods; i++)
1207 dwc_desc_put(dwc, cdesc->desc[i]);
1208
1209 kfree(cdesc->desc);
1210 kfree(cdesc);
1211
1212 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1213}
1214EXPORT_SYMBOL(dw_dma_cyclic_free);
1215
887/*----------------------------------------------------------------------*/ 1216/*----------------------------------------------------------------------*/
888 1217
889static void dw_dma_off(struct dw_dma *dw) 1218static void dw_dma_off(struct dw_dma *dw)
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index b252b202c5cf..13a580767031 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -126,6 +126,10 @@ struct dw_dma_regs {
126 126
127#define DW_REGLEN 0x400 127#define DW_REGLEN 0x400
128 128
129enum dw_dmac_flags {
130 DW_DMA_IS_CYCLIC = 0,
131};
132
129struct dw_dma_chan { 133struct dw_dma_chan {
130 struct dma_chan chan; 134 struct dma_chan chan;
131 void __iomem *ch_regs; 135 void __iomem *ch_regs;
@@ -134,10 +138,12 @@ struct dw_dma_chan {
134 spinlock_t lock; 138 spinlock_t lock;
135 139
136 /* these other elements are all protected by lock */ 140 /* these other elements are all protected by lock */
141 unsigned long flags;
137 dma_cookie_t completed; 142 dma_cookie_t completed;
138 struct list_head active_list; 143 struct list_head active_list;
139 struct list_head queue; 144 struct list_head queue;
140 struct list_head free_list; 145 struct list_head free_list;
146 struct dw_cyclic_desc *cdesc;
141 147
142 unsigned int descs_allocated; 148 unsigned int descs_allocated;
143}; 149};
@@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
158 return container_of(chan, struct dw_dma_chan, chan); 164 return container_of(chan, struct dw_dma_chan, chan);
159} 165}
160 166
161
162struct dw_dma { 167struct dw_dma {
163 struct dma_device dma; 168 struct dma_device dma;
164 void __iomem *regs; 169 void __iomem *regs;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 86d6da47f558..da8a8ed9e411 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -354,7 +354,6 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
354 dma_async_tx_descriptor_init(&desc_sw->async_tx, 354 dma_async_tx_descriptor_init(&desc_sw->async_tx,
355 &fsl_chan->common); 355 &fsl_chan->common);
356 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; 356 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
357 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
358 desc_sw->async_tx.phys = pdesc; 357 desc_sw->async_tx.phys = pdesc;
359 } 358 }
360 359
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 5905cd36bcd2..e4fc33c1c32f 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -693,7 +693,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
693 desc_sw->async_tx.tx_submit = ioat2_tx_submit; 693 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
694 break; 694 break;
695 } 695 }
696 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
697 696
698 desc_sw->hw = desc; 697 desc_sw->hw = desc;
699 desc_sw->async_tx.phys = phys; 698 desc_sw->async_tx.phys = phys;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 16adbe61cfb2..2f052265122f 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -498,7 +498,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
498 slot->async_tx.tx_submit = iop_adma_tx_submit; 498 slot->async_tx.tx_submit = iop_adma_tx_submit;
499 INIT_LIST_HEAD(&slot->chain_node); 499 INIT_LIST_HEAD(&slot->chain_node);
500 INIT_LIST_HEAD(&slot->slot_node); 500 INIT_LIST_HEAD(&slot->slot_node);
501 INIT_LIST_HEAD(&slot->async_tx.tx_list);
502 hw_desc = (char *) iop_chan->device->dma_desc_pool; 501 hw_desc = (char *) iop_chan->device->dma_desc_pool;
503 slot->async_tx.phys = 502 slot->async_tx.phys =
504 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; 503 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index da781d107895..e202a6ce5573 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -28,6 +28,9 @@
28#define FS_VF_IN_VALID 0x00000002 28#define FS_VF_IN_VALID 0x00000002
29#define FS_ENC_IN_VALID 0x00000001 29#define FS_ENC_IN_VALID 0x00000001
30 30
31static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
32 bool wait_for_stop);
33
31/* 34/*
32 * There can be only one, we could allocate it dynamically, but then we'd have 35 * There can be only one, we could allocate it dynamically, but then we'd have
33 * to add an extra parameter to some functions, and use something as ugly as 36 * to add an extra parameter to some functions, and use something as ugly as
@@ -107,7 +110,7 @@ static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
107 } 110 }
108} 111}
109 112
110/* Enable / disable direct write to memory by the Camera Sensor Interface */ 113/* Enable direct write to memory by the Camera Sensor Interface */
111static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) 114static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
112{ 115{
113 uint32_t ic_conf, mask; 116 uint32_t ic_conf, mask;
@@ -126,6 +129,7 @@ static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
126 idmac_write_icreg(ipu, ic_conf, IC_CONF); 129 idmac_write_icreg(ipu, ic_conf, IC_CONF);
127} 130}
128 131
132/* Called under spin_lock_irqsave(&ipu_data.lock) */
129static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel) 133static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
130{ 134{
131 uint32_t ic_conf, mask; 135 uint32_t ic_conf, mask;
@@ -422,7 +426,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
422 break; 426 break;
423 default: 427 default:
424 dev_err(ipu_data.dev, 428 dev_err(ipu_data.dev,
425 "mxc ipu: unimplemented pixel format %d\n", pixel_fmt); 429 "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
426 break; 430 break;
427 } 431 }
428 432
@@ -433,20 +437,20 @@ static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
433 uint16_t burst_pixels) 437 uint16_t burst_pixels)
434{ 438{
435 params->pp.npb = burst_pixels - 1; 439 params->pp.npb = burst_pixels - 1;
436}; 440}
437 441
438static void ipu_ch_param_set_buffer(union chan_param_mem *params, 442static void ipu_ch_param_set_buffer(union chan_param_mem *params,
439 dma_addr_t buf0, dma_addr_t buf1) 443 dma_addr_t buf0, dma_addr_t buf1)
440{ 444{
441 params->pp.eba0 = buf0; 445 params->pp.eba0 = buf0;
442 params->pp.eba1 = buf1; 446 params->pp.eba1 = buf1;
443}; 447}
444 448
445static void ipu_ch_param_set_rotation(union chan_param_mem *params, 449static void ipu_ch_param_set_rotation(union chan_param_mem *params,
446 enum ipu_rotate_mode rotate) 450 enum ipu_rotate_mode rotate)
447{ 451{
448 params->pp.bam = rotate; 452 params->pp.bam = rotate;
449}; 453}
450 454
451static void ipu_write_param_mem(uint32_t addr, uint32_t *data, 455static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
452 uint32_t num_words) 456 uint32_t num_words)
@@ -571,7 +575,7 @@ static uint32_t dma_param_addr(uint32_t dma_ch)
571{ 575{
572 /* Channel Parameter Memory */ 576 /* Channel Parameter Memory */
573 return 0x10000 | (dma_ch << 4); 577 return 0x10000 | (dma_ch << 4);
574}; 578}
575 579
576static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel, 580static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
577 bool prio) 581 bool prio)
@@ -611,7 +615,8 @@ static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
611 615
612/** 616/**
613 * ipu_enable_channel() - enable an IPU channel. 617 * ipu_enable_channel() - enable an IPU channel.
614 * @channel: channel ID. 618 * @idmac: IPU DMAC context.
619 * @ichan: IDMAC channel.
615 * @return: 0 on success or negative error code on failure. 620 * @return: 0 on success or negative error code on failure.
616 */ 621 */
617static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) 622static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
@@ -649,7 +654,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
649 654
650/** 655/**
651 * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel. 656 * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
652 * @channel: channel ID. 657 * @ichan: IDMAC channel.
653 * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code. 658 * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
654 * @width: width of buffer in pixels. 659 * @width: width of buffer in pixels.
655 * @height: height of buffer in pixels. 660 * @height: height of buffer in pixels.
@@ -687,7 +692,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
687 } 692 }
688 693
689 /* IC channel's stride must be a multiple of 8 pixels */ 694 /* IC channel's stride must be a multiple of 8 pixels */
690 if ((channel <= 13) && (stride % 8)) { 695 if ((channel <= IDMAC_IC_13) && (stride % 8)) {
691 dev_err(ipu->dev, "Stride must be 8 pixel multiple\n"); 696 dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
692 return -EINVAL; 697 return -EINVAL;
693 } 698 }
@@ -752,7 +757,7 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
752 757
753/** 758/**
754 * ipu_update_channel_buffer() - update physical address of a channel buffer. 759 * ipu_update_channel_buffer() - update physical address of a channel buffer.
755 * @channel: channel ID. 760 * @ichan: IDMAC channel.
756 * @buffer_n: buffer number to update. 761 * @buffer_n: buffer number to update.
757 * 0 or 1 are the only valid values. 762 * 0 or 1 are the only valid values.
758 * @phyaddr: buffer physical address. 763 * @phyaddr: buffer physical address.
@@ -760,9 +765,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
760 * function will fail if the buffer is set to ready. 765 * function will fail if the buffer is set to ready.
761 */ 766 */
762/* Called under spin_lock(_irqsave)(&ichan->lock) */ 767/* Called under spin_lock(_irqsave)(&ichan->lock) */
763static int ipu_update_channel_buffer(enum ipu_channel channel, 768static int ipu_update_channel_buffer(struct idmac_channel *ichan,
764 int buffer_n, dma_addr_t phyaddr) 769 int buffer_n, dma_addr_t phyaddr)
765{ 770{
771 enum ipu_channel channel = ichan->dma_chan.chan_id;
766 uint32_t reg; 772 uint32_t reg;
767 unsigned long flags; 773 unsigned long flags;
768 774
@@ -771,8 +777,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
771 if (buffer_n == 0) { 777 if (buffer_n == 0) {
772 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); 778 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
773 if (reg & (1UL << channel)) { 779 if (reg & (1UL << channel)) {
774 spin_unlock_irqrestore(&ipu_data.lock, flags); 780 ipu_ic_disable_task(&ipu_data, channel);
775 return -EACCES; 781 ichan->status = IPU_CHANNEL_READY;
776 } 782 }
777 783
778 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */ 784 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
@@ -782,8 +788,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
782 } else { 788 } else {
783 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); 789 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
784 if (reg & (1UL << channel)) { 790 if (reg & (1UL << channel)) {
785 spin_unlock_irqrestore(&ipu_data.lock, flags); 791 ipu_ic_disable_task(&ipu_data, channel);
786 return -EACCES; 792 ichan->status = IPU_CHANNEL_READY;
787 } 793 }
788 794
789 /* Check if double-buffering is already enabled */ 795 /* Check if double-buffering is already enabled */
@@ -805,6 +811,39 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
805} 811}
806 812
807/* Called under spin_lock_irqsave(&ichan->lock) */ 813/* Called under spin_lock_irqsave(&ichan->lock) */
814static int ipu_submit_buffer(struct idmac_channel *ichan,
815 struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
816{
817 unsigned int chan_id = ichan->dma_chan.chan_id;
818 struct device *dev = &ichan->dma_chan.dev->device;
819 int ret;
820
821 if (async_tx_test_ack(&desc->txd))
822 return -EINTR;
823
824 /*
825 * On first invocation this shouldn't be necessary, the call to
826 * ipu_init_channel_buffer() above will set addresses for us, so we
827 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
828 * doing it again shouldn't hurt either.
829 */
830 ret = ipu_update_channel_buffer(ichan, buf_idx,
831 sg_dma_address(sg));
832
833 if (ret < 0) {
834 dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
835 sg, chan_id, buf_idx);
836 return ret;
837 }
838
839 ipu_select_buffer(chan_id, buf_idx);
840 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
841 sg, chan_id, buf_idx);
842
843 return 0;
844}
845
846/* Called under spin_lock_irqsave(&ichan->lock) */
808static int ipu_submit_channel_buffers(struct idmac_channel *ichan, 847static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
809 struct idmac_tx_desc *desc) 848 struct idmac_tx_desc *desc)
810{ 849{
@@ -815,20 +854,10 @@ static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
815 if (!ichan->sg[i]) { 854 if (!ichan->sg[i]) {
816 ichan->sg[i] = sg; 855 ichan->sg[i] = sg;
817 856
818 /* 857 ret = ipu_submit_buffer(ichan, desc, sg, i);
819 * On first invocation this shouldn't be necessary, the
820 * call to ipu_init_channel_buffer() above will set
821 * addresses for us, so we could make it conditional
822 * on status >= IPU_CHANNEL_ENABLED, but doing it again
823 * shouldn't hurt either.
824 */
825 ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i,
826 sg_dma_address(sg));
827 if (ret < 0) 858 if (ret < 0)
828 return ret; 859 return ret;
829 860
830 ipu_select_buffer(ichan->dma_chan.chan_id, i);
831
832 sg = sg_next(sg); 861 sg = sg_next(sg);
833 } 862 }
834 } 863 }
@@ -842,19 +871,22 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
842 struct idmac_channel *ichan = to_idmac_chan(tx->chan); 871 struct idmac_channel *ichan = to_idmac_chan(tx->chan);
843 struct idmac *idmac = to_idmac(tx->chan->device); 872 struct idmac *idmac = to_idmac(tx->chan->device);
844 struct ipu *ipu = to_ipu(idmac); 873 struct ipu *ipu = to_ipu(idmac);
874 struct device *dev = &ichan->dma_chan.dev->device;
845 dma_cookie_t cookie; 875 dma_cookie_t cookie;
846 unsigned long flags; 876 unsigned long flags;
877 int ret;
847 878
848 /* Sanity check */ 879 /* Sanity check */
849 if (!list_empty(&desc->list)) { 880 if (!list_empty(&desc->list)) {
850 /* The descriptor doesn't belong to client */ 881 /* The descriptor doesn't belong to client */
851 dev_err(&ichan->dma_chan.dev->device, 882 dev_err(dev, "Descriptor %p not prepared!\n", tx);
852 "Descriptor %p not prepared!\n", tx);
853 return -EBUSY; 883 return -EBUSY;
854 } 884 }
855 885
856 mutex_lock(&ichan->chan_mutex); 886 mutex_lock(&ichan->chan_mutex);
857 887
888 async_tx_clear_ack(tx);
889
858 if (ichan->status < IPU_CHANNEL_READY) { 890 if (ichan->status < IPU_CHANNEL_READY) {
859 struct idmac_video_param *video = &ichan->params.video; 891 struct idmac_video_param *video = &ichan->params.video;
860 /* 892 /*
@@ -878,16 +910,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
878 goto out; 910 goto out;
879 } 911 }
880 912
881 /* ipu->lock can be taken under ichan->lock, but not v.v. */ 913 dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
882 spin_lock_irqsave(&ichan->lock, flags);
883
884 /* submit_buffers() atomically verifies and fills empty sg slots */
885 cookie = ipu_submit_channel_buffers(ichan, desc);
886
887 spin_unlock_irqrestore(&ichan->lock, flags);
888
889 if (cookie < 0)
890 goto out;
891 914
892 cookie = ichan->dma_chan.cookie; 915 cookie = ichan->dma_chan.cookie;
893 916
@@ -897,24 +920,40 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
897 /* from dmaengine.h: "last cookie value returned to client" */ 920 /* from dmaengine.h: "last cookie value returned to client" */
898 ichan->dma_chan.cookie = cookie; 921 ichan->dma_chan.cookie = cookie;
899 tx->cookie = cookie; 922 tx->cookie = cookie;
923
924 /* ipu->lock can be taken under ichan->lock, but not v.v. */
900 spin_lock_irqsave(&ichan->lock, flags); 925 spin_lock_irqsave(&ichan->lock, flags);
926
901 list_add_tail(&desc->list, &ichan->queue); 927 list_add_tail(&desc->list, &ichan->queue);
928 /* submit_buffers() atomically verifies and fills empty sg slots */
929 ret = ipu_submit_channel_buffers(ichan, desc);
930
902 spin_unlock_irqrestore(&ichan->lock, flags); 931 spin_unlock_irqrestore(&ichan->lock, flags);
903 932
933 if (ret < 0) {
934 cookie = ret;
935 goto dequeue;
936 }
937
904 if (ichan->status < IPU_CHANNEL_ENABLED) { 938 if (ichan->status < IPU_CHANNEL_ENABLED) {
905 int ret = ipu_enable_channel(idmac, ichan); 939 ret = ipu_enable_channel(idmac, ichan);
906 if (ret < 0) { 940 if (ret < 0) {
907 cookie = ret; 941 cookie = ret;
908 spin_lock_irqsave(&ichan->lock, flags); 942 goto dequeue;
909 list_del_init(&desc->list);
910 spin_unlock_irqrestore(&ichan->lock, flags);
911 tx->cookie = cookie;
912 ichan->dma_chan.cookie = cookie;
913 } 943 }
914 } 944 }
915 945
916 dump_idmac_reg(ipu); 946 dump_idmac_reg(ipu);
917 947
948dequeue:
949 if (cookie < 0) {
950 spin_lock_irqsave(&ichan->lock, flags);
951 list_del_init(&desc->list);
952 spin_unlock_irqrestore(&ichan->lock, flags);
953 tx->cookie = cookie;
954 ichan->dma_chan.cookie = cookie;
955 }
956
918out: 957out:
919 mutex_unlock(&ichan->chan_mutex); 958 mutex_unlock(&ichan->chan_mutex);
920 959
@@ -944,8 +983,6 @@ static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
944 memset(txd, 0, sizeof(*txd)); 983 memset(txd, 0, sizeof(*txd));
945 dma_async_tx_descriptor_init(txd, &ichan->dma_chan); 984 dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
946 txd->tx_submit = idmac_tx_submit; 985 txd->tx_submit = idmac_tx_submit;
947 txd->chan = &ichan->dma_chan;
948 INIT_LIST_HEAD(&txd->tx_list);
949 986
950 list_add(&desc->list, &ichan->free_list); 987 list_add(&desc->list, &ichan->free_list);
951 988
@@ -1161,6 +1198,24 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1161 return 0; 1198 return 0;
1162} 1199}
1163 1200
1201static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
1202 struct idmac_tx_desc **desc, struct scatterlist *sg)
1203{
1204 struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
1205
1206 if (sgnew)
1207 /* next sg-element in this list */
1208 return sgnew;
1209
1210 if ((*desc)->list.next == &ichan->queue)
1211 /* No more descriptors on the queue */
1212 return NULL;
1213
1214 /* Fetch next descriptor */
1215 *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
1216 return (*desc)->sg;
1217}
1218
1164/* 1219/*
1165 * We have several possibilities here: 1220 * We have several possibilities here:
1166 * current BUF next BUF 1221 * current BUF next BUF
@@ -1176,23 +1231,46 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1176static irqreturn_t idmac_interrupt(int irq, void *dev_id) 1231static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1177{ 1232{
1178 struct idmac_channel *ichan = dev_id; 1233 struct idmac_channel *ichan = dev_id;
1234 struct device *dev = &ichan->dma_chan.dev->device;
1179 unsigned int chan_id = ichan->dma_chan.chan_id; 1235 unsigned int chan_id = ichan->dma_chan.chan_id;
1180 struct scatterlist **sg, *sgnext, *sgnew = NULL; 1236 struct scatterlist **sg, *sgnext, *sgnew = NULL;
1181 /* Next transfer descriptor */ 1237 /* Next transfer descriptor */
1182 struct idmac_tx_desc *desc = NULL, *descnew; 1238 struct idmac_tx_desc *desc, *descnew;
1183 dma_async_tx_callback callback; 1239 dma_async_tx_callback callback;
1184 void *callback_param; 1240 void *callback_param;
1185 bool done = false; 1241 bool done = false;
1186 u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY), 1242 u32 ready0, ready1, curbuf, err;
1187 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY), 1243 unsigned long flags;
1188 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1189 1244
1190 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ 1245 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
1191 1246
1192 pr_debug("IDMAC irq %d\n", irq); 1247 dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
1248
1249 spin_lock_irqsave(&ipu_data.lock, flags);
1250
1251 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
1252 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
1253 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1254 err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
1255
1256 if (err & (1 << chan_id)) {
1257 idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
1258 spin_unlock_irqrestore(&ipu_data.lock, flags);
1259 /*
1260 * Doing this
1261 * ichan->sg[0] = ichan->sg[1] = NULL;
1262 * you can force channel re-enable on the next tx_submit(), but
1263 * this is dirty - think about descriptors with multiple
1264 * sg elements.
1265 */
1266 dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
1267 chan_id, ready0, ready1, curbuf);
1268 return IRQ_HANDLED;
1269 }
1270 spin_unlock_irqrestore(&ipu_data.lock, flags);
1271
1193 /* Other interrupts do not interfere with this channel */ 1272 /* Other interrupts do not interfere with this channel */
1194 spin_lock(&ichan->lock); 1273 spin_lock(&ichan->lock);
1195
1196 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && 1274 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
1197 ((curbuf >> chan_id) & 1) == ichan->active_buffer)) { 1275 ((curbuf >> chan_id) & 1) == ichan->active_buffer)) {
1198 int i = 100; 1276 int i = 100;
@@ -1207,19 +1285,23 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1207 1285
1208 if (!i) { 1286 if (!i) {
1209 spin_unlock(&ichan->lock); 1287 spin_unlock(&ichan->lock);
1210 dev_dbg(ichan->dma_chan.device->dev, 1288 dev_dbg(dev,
1211 "IRQ on active buffer on channel %x, active " 1289 "IRQ on active buffer on channel %x, active "
1212 "%d, ready %x, %x, current %x!\n", chan_id, 1290 "%d, ready %x, %x, current %x!\n", chan_id,
1213 ichan->active_buffer, ready0, ready1, curbuf); 1291 ichan->active_buffer, ready0, ready1, curbuf);
1214 return IRQ_NONE; 1292 return IRQ_NONE;
1215 } 1293 } else
1294 dev_dbg(dev,
1295 "Buffer deactivated on channel %x, active "
1296 "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
1297 ichan->active_buffer, ready0, ready1, curbuf, i);
1216 } 1298 }
1217 1299
1218 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || 1300 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1219 (!ichan->active_buffer && (ready0 >> chan_id) & 1) 1301 (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1220 )) { 1302 )) {
1221 spin_unlock(&ichan->lock); 1303 spin_unlock(&ichan->lock);
1222 dev_dbg(ichan->dma_chan.device->dev, 1304 dev_dbg(dev,
1223 "IRQ with active buffer still ready on channel %x, " 1305 "IRQ with active buffer still ready on channel %x, "
1224 "active %d, ready %x, %x!\n", chan_id, 1306 "active %d, ready %x, %x!\n", chan_id,
1225 ichan->active_buffer, ready0, ready1); 1307 ichan->active_buffer, ready0, ready1);
@@ -1227,8 +1309,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1227 } 1309 }
1228 1310
1229 if (unlikely(list_empty(&ichan->queue))) { 1311 if (unlikely(list_empty(&ichan->queue))) {
1312 ichan->sg[ichan->active_buffer] = NULL;
1230 spin_unlock(&ichan->lock); 1313 spin_unlock(&ichan->lock);
1231 dev_err(ichan->dma_chan.device->dev, 1314 dev_err(dev,
1232 "IRQ without queued buffers on channel %x, active %d, " 1315 "IRQ without queued buffers on channel %x, active %d, "
1233 "ready %x, %x!\n", chan_id, 1316 "ready %x, %x!\n", chan_id,
1234 ichan->active_buffer, ready0, ready1); 1317 ichan->active_buffer, ready0, ready1);
@@ -1243,40 +1326,44 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1243 sg = &ichan->sg[ichan->active_buffer]; 1326 sg = &ichan->sg[ichan->active_buffer];
1244 sgnext = ichan->sg[!ichan->active_buffer]; 1327 sgnext = ichan->sg[!ichan->active_buffer];
1245 1328
1329 if (!*sg) {
1330 spin_unlock(&ichan->lock);
1331 return IRQ_HANDLED;
1332 }
1333
1334 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
1335 descnew = desc;
1336
1337 dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n",
1338 irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf);
1339
1340 /* Find the descriptor of sgnext */
1341 sgnew = idmac_sg_next(ichan, &descnew, *sg);
1342 if (sgnext != sgnew)
1343 dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
1344
1246 /* 1345 /*
1247 * if sgnext == NULL sg must be the last element in a scatterlist and 1346 * if sgnext == NULL sg must be the last element in a scatterlist and
1248 * queue must be empty 1347 * queue must be empty
1249 */ 1348 */
1250 if (unlikely(!sgnext)) { 1349 if (unlikely(!sgnext)) {
1251 if (unlikely(sg_next(*sg))) { 1350 if (!WARN_ON(sg_next(*sg)))
1252 dev_err(ichan->dma_chan.device->dev, 1351 dev_dbg(dev, "Underrun on channel %x\n", chan_id);
1253 "Broken buffer-update locking on channel %x!\n", 1352 ichan->sg[!ichan->active_buffer] = sgnew;
1254 chan_id); 1353
1255 /* We'll let the user catch up */ 1354 if (unlikely(sgnew)) {
1355 ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
1256 } else { 1356 } else {
1257 /* Underrun */ 1357 spin_lock_irqsave(&ipu_data.lock, flags);
1258 ipu_ic_disable_task(&ipu_data, chan_id); 1358 ipu_ic_disable_task(&ipu_data, chan_id);
1259 dev_dbg(ichan->dma_chan.device->dev, 1359 spin_unlock_irqrestore(&ipu_data.lock, flags);
1260 "Underrun on channel %x\n", chan_id);
1261 ichan->status = IPU_CHANNEL_READY; 1360 ichan->status = IPU_CHANNEL_READY;
1262 /* Continue to check for complete descriptor */ 1361 /* Continue to check for complete descriptor */
1263 } 1362 }
1264 } 1363 }
1265 1364
1266 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); 1365 /* Calculate and submit the next sg element */
1267 1366 sgnew = idmac_sg_next(ichan, &descnew, sgnew);
1268 /* First calculate and submit the next sg element */
1269 if (likely(sgnext))
1270 sgnew = sg_next(sgnext);
1271
1272 if (unlikely(!sgnew)) {
1273 /* Start a new scatterlist, if any queued */
1274 if (likely(desc->list.next != &ichan->queue)) {
1275 descnew = list_entry(desc->list.next,
1276 struct idmac_tx_desc, list);
1277 sgnew = &descnew->sg[0];
1278 }
1279 }
1280 1367
1281 if (unlikely(!sg_next(*sg)) || !sgnext) { 1368 if (unlikely(!sg_next(*sg)) || !sgnext) {
1282 /* 1369 /*
@@ -1289,17 +1376,13 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1289 1376
1290 *sg = sgnew; 1377 *sg = sgnew;
1291 1378
1292 if (likely(sgnew)) { 1379 if (likely(sgnew) &&
1293 int ret; 1380 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1294 1381 callback = desc->txd.callback;
1295 ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer, 1382 callback_param = desc->txd.callback_param;
1296 sg_dma_address(*sg)); 1383 spin_unlock(&ichan->lock);
1297 if (ret < 0) 1384 callback(callback_param);
1298 dev_err(ichan->dma_chan.device->dev, 1385 spin_lock(&ichan->lock);
1299 "Failed to update buffer on channel %x buffer %d!\n",
1300 chan_id, ichan->active_buffer);
1301 else
1302 ipu_select_buffer(chan_id, ichan->active_buffer);
1303 } 1386 }
1304 1387
1305 /* Flip the active buffer - even if update above failed */ 1388 /* Flip the active buffer - even if update above failed */
@@ -1327,13 +1410,20 @@ static void ipu_gc_tasklet(unsigned long arg)
1327 struct idmac_channel *ichan = ipu->channel + i; 1410 struct idmac_channel *ichan = ipu->channel + i;
1328 struct idmac_tx_desc *desc; 1411 struct idmac_tx_desc *desc;
1329 unsigned long flags; 1412 unsigned long flags;
1330 int j; 1413 struct scatterlist *sg;
1414 int j, k;
1331 1415
1332 for (j = 0; j < ichan->n_tx_desc; j++) { 1416 for (j = 0; j < ichan->n_tx_desc; j++) {
1333 desc = ichan->desc + j; 1417 desc = ichan->desc + j;
1334 spin_lock_irqsave(&ichan->lock, flags); 1418 spin_lock_irqsave(&ichan->lock, flags);
1335 if (async_tx_test_ack(&desc->txd)) { 1419 if (async_tx_test_ack(&desc->txd)) {
1336 list_move(&desc->list, &ichan->free_list); 1420 list_move(&desc->list, &ichan->free_list);
1421 for_each_sg(desc->sg, sg, desc->sg_len, k) {
1422 if (ichan->sg[0] == sg)
1423 ichan->sg[0] = NULL;
1424 else if (ichan->sg[1] == sg)
1425 ichan->sg[1] = NULL;
1426 }
1337 async_tx_clear_ack(&desc->txd); 1427 async_tx_clear_ack(&desc->txd);
1338 } 1428 }
1339 spin_unlock_irqrestore(&ichan->lock, flags); 1429 spin_unlock_irqrestore(&ichan->lock, flags);
@@ -1341,13 +1431,7 @@ static void ipu_gc_tasklet(unsigned long arg)
1341 } 1431 }
1342} 1432}
1343 1433
1344/* 1434/* Allocate and initialise a transfer descriptor. */
1345 * At the time .device_alloc_chan_resources() method is called, we cannot know,
1346 * whether the client will accept the channel. Thus we must only check, if we
1347 * can satisfy client's request but the only real criterion to verify, whether
1348 * the client has accepted our offer is the client_count. That's why we have to
1349 * perform the rest of our allocation tasks on the first call to this function.
1350 */
1351static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, 1435static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
1352 struct scatterlist *sgl, unsigned int sg_len, 1436 struct scatterlist *sgl, unsigned int sg_len,
1353 enum dma_data_direction direction, unsigned long tx_flags) 1437 enum dma_data_direction direction, unsigned long tx_flags)
@@ -1358,8 +1442,8 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
1358 unsigned long flags; 1442 unsigned long flags;
1359 1443
1360 /* We only can handle these three channels so far */ 1444 /* We only can handle these three channels so far */
1361 if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 && 1445 if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
1362 ichan->dma_chan.chan_id != IDMAC_IC_7) 1446 chan->chan_id != IDMAC_IC_7)
1363 return NULL; 1447 return NULL;
1364 1448
1365 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { 1449 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
@@ -1400,7 +1484,7 @@ static void idmac_issue_pending(struct dma_chan *chan)
1400 1484
1401 /* This is not always needed, but doesn't hurt either */ 1485 /* This is not always needed, but doesn't hurt either */
1402 spin_lock_irqsave(&ipu->lock, flags); 1486 spin_lock_irqsave(&ipu->lock, flags);
1403 ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer); 1487 ipu_select_buffer(chan->chan_id, ichan->active_buffer);
1404 spin_unlock_irqrestore(&ipu->lock, flags); 1488 spin_unlock_irqrestore(&ipu->lock, flags);
1405 1489
1406 /* 1490 /*
@@ -1432,8 +1516,7 @@ static void __idmac_terminate_all(struct dma_chan *chan)
1432 struct idmac_tx_desc *desc = ichan->desc + i; 1516 struct idmac_tx_desc *desc = ichan->desc + i;
1433 if (list_empty(&desc->list)) 1517 if (list_empty(&desc->list))
1434 /* Descriptor was prepared, but not submitted */ 1518 /* Descriptor was prepared, but not submitted */
1435 list_add(&desc->list, 1519 list_add(&desc->list, &ichan->free_list);
1436 &ichan->free_list);
1437 1520
1438 async_tx_clear_ack(&desc->txd); 1521 async_tx_clear_ack(&desc->txd);
1439 } 1522 }
@@ -1458,6 +1541,28 @@ static void idmac_terminate_all(struct dma_chan *chan)
1458 mutex_unlock(&ichan->chan_mutex); 1541 mutex_unlock(&ichan->chan_mutex);
1459} 1542}
1460 1543
1544#ifdef DEBUG
1545static irqreturn_t ic_sof_irq(int irq, void *dev_id)
1546{
1547 struct idmac_channel *ichan = dev_id;
1548 printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
1549 irq, ichan->dma_chan.chan_id);
1550 disable_irq(irq);
1551 return IRQ_HANDLED;
1552}
1553
1554static irqreturn_t ic_eof_irq(int irq, void *dev_id)
1555{
1556 struct idmac_channel *ichan = dev_id;
1557 printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
1558 irq, ichan->dma_chan.chan_id);
1559 disable_irq(irq);
1560 return IRQ_HANDLED;
1561}
1562
1563static int ic_sof = -EINVAL, ic_eof = -EINVAL;
1564#endif
1565
1461static int idmac_alloc_chan_resources(struct dma_chan *chan) 1566static int idmac_alloc_chan_resources(struct dma_chan *chan)
1462{ 1567{
1463 struct idmac_channel *ichan = to_idmac_chan(chan); 1568 struct idmac_channel *ichan = to_idmac_chan(chan);
@@ -1471,31 +1576,49 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
1471 chan->cookie = 1; 1576 chan->cookie = 1;
1472 ichan->completed = -ENXIO; 1577 ichan->completed = -ENXIO;
1473 1578
1474 ret = ipu_irq_map(ichan->dma_chan.chan_id); 1579 ret = ipu_irq_map(chan->chan_id);
1475 if (ret < 0) 1580 if (ret < 0)
1476 goto eimap; 1581 goto eimap;
1477 1582
1478 ichan->eof_irq = ret; 1583 ichan->eof_irq = ret;
1584
1585 /*
1586 * Important to first disable the channel, because maybe someone
1587 * used it before us, e.g., the bootloader
1588 */
1589 ipu_disable_channel(idmac, ichan, true);
1590
1591 ret = ipu_init_channel(idmac, ichan);
1592 if (ret < 0)
1593 goto eichan;
1594
1479 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, 1595 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
1480 ichan->eof_name, ichan); 1596 ichan->eof_name, ichan);
1481 if (ret < 0) 1597 if (ret < 0)
1482 goto erirq; 1598 goto erirq;
1483 1599
1484 ret = ipu_init_channel(idmac, ichan); 1600#ifdef DEBUG
1485 if (ret < 0) 1601 if (chan->chan_id == IDMAC_IC_7) {
1486 goto eichan; 1602 ic_sof = ipu_irq_map(69);
1603 if (ic_sof > 0)
1604 request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
1605 ic_eof = ipu_irq_map(70);
1606 if (ic_eof > 0)
1607 request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
1608 }
1609#endif
1487 1610
1488 ichan->status = IPU_CHANNEL_INITIALIZED; 1611 ichan->status = IPU_CHANNEL_INITIALIZED;
1489 1612
1490 dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n", 1613 dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
1491 ichan->dma_chan.chan_id, ichan->eof_irq); 1614 chan->chan_id, ichan->eof_irq);
1492 1615
1493 return ret; 1616 return ret;
1494 1617
1495eichan:
1496 free_irq(ichan->eof_irq, ichan);
1497erirq: 1618erirq:
1498 ipu_irq_unmap(ichan->dma_chan.chan_id); 1619 ipu_uninit_channel(idmac, ichan);
1620eichan:
1621 ipu_irq_unmap(chan->chan_id);
1499eimap: 1622eimap:
1500 return ret; 1623 return ret;
1501} 1624}
@@ -1510,8 +1633,22 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1510 __idmac_terminate_all(chan); 1633 __idmac_terminate_all(chan);
1511 1634
1512 if (ichan->status > IPU_CHANNEL_FREE) { 1635 if (ichan->status > IPU_CHANNEL_FREE) {
1636#ifdef DEBUG
1637 if (chan->chan_id == IDMAC_IC_7) {
1638 if (ic_sof > 0) {
1639 free_irq(ic_sof, ichan);
1640 ipu_irq_unmap(69);
1641 ic_sof = -EINVAL;
1642 }
1643 if (ic_eof > 0) {
1644 free_irq(ic_eof, ichan);
1645 ipu_irq_unmap(70);
1646 ic_eof = -EINVAL;
1647 }
1648 }
1649#endif
1513 free_irq(ichan->eof_irq, ichan); 1650 free_irq(ichan->eof_irq, ichan);
1514 ipu_irq_unmap(ichan->dma_chan.chan_id); 1651 ipu_irq_unmap(chan->chan_id);
1515 } 1652 }
1516 1653
1517 ichan->status = IPU_CHANNEL_FREE; 1654 ichan->status = IPU_CHANNEL_FREE;
@@ -1573,7 +1710,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1573 dma_chan->device = &idmac->dma; 1710 dma_chan->device = &idmac->dma;
1574 dma_chan->cookie = 1; 1711 dma_chan->cookie = 1;
1575 dma_chan->chan_id = i; 1712 dma_chan->chan_id = i;
1576 list_add_tail(&ichan->dma_chan.device_node, &dma->channels); 1713 list_add_tail(&dma_chan->device_node, &dma->channels);
1577 } 1714 }
1578 1715
1579 idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF); 1716 idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
@@ -1581,7 +1718,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1581 return dma_async_device_register(&idmac->dma); 1718 return dma_async_device_register(&idmac->dma);
1582} 1719}
1583 1720
1584static void ipu_idmac_exit(struct ipu *ipu) 1721static void __exit ipu_idmac_exit(struct ipu *ipu)
1585{ 1722{
1586 int i; 1723 int i;
1587 struct idmac *idmac = &ipu->idmac; 1724 struct idmac *idmac = &ipu->idmac;
@@ -1600,7 +1737,7 @@ static void ipu_idmac_exit(struct ipu *ipu)
1600 * IPU common probe / remove 1737 * IPU common probe / remove
1601 */ 1738 */
1602 1739
1603static int ipu_probe(struct platform_device *pdev) 1740static int __init ipu_probe(struct platform_device *pdev)
1604{ 1741{
1605 struct ipu_platform_data *pdata = pdev->dev.platform_data; 1742 struct ipu_platform_data *pdata = pdev->dev.platform_data;
1606 struct resource *mem_ipu, *mem_ic; 1743 struct resource *mem_ipu, *mem_ic;
@@ -1700,7 +1837,7 @@ err_noirq:
1700 return ret; 1837 return ret;
1701} 1838}
1702 1839
1703static int ipu_remove(struct platform_device *pdev) 1840static int __exit ipu_remove(struct platform_device *pdev)
1704{ 1841{
1705 struct ipu *ipu = platform_get_drvdata(pdev); 1842 struct ipu *ipu = platform_get_drvdata(pdev);
1706 1843
@@ -1725,7 +1862,7 @@ static struct platform_driver ipu_platform_driver = {
1725 .name = "ipu-core", 1862 .name = "ipu-core",
1726 .owner = THIS_MODULE, 1863 .owner = THIS_MODULE,
1727 }, 1864 },
1728 .remove = ipu_remove, 1865 .remove = __exit_p(ipu_remove),
1729}; 1866};
1730 1867
1731static int __init ipu_init(void) 1868static int __init ipu_init(void)
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 83f532cc767f..dd8ebc75b667 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -352,7 +352,7 @@ static struct irq_chip ipu_irq_chip = {
352}; 352};
353 353
354/* Install the IRQ handler */ 354/* Install the IRQ handler */
355int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) 355int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
356{ 356{
357 struct ipu_platform_data *pdata = dev->dev.platform_data; 357 struct ipu_platform_data *pdata = dev->dev.platform_data;
358 unsigned int irq, irq_base, i; 358 unsigned int irq, irq_base, i;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index cb7f26fb9f18..ddab94f51224 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -632,7 +632,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
632 slot->async_tx.tx_submit = mv_xor_tx_submit; 632 slot->async_tx.tx_submit = mv_xor_tx_submit;
633 INIT_LIST_HEAD(&slot->chain_node); 633 INIT_LIST_HEAD(&slot->chain_node);
634 INIT_LIST_HEAD(&slot->slot_node); 634 INIT_LIST_HEAD(&slot->slot_node);
635 INIT_LIST_HEAD(&slot->async_tx.tx_list);
636 hw_desc = (char *) mv_chan->device->dma_desc_pool; 635 hw_desc = (char *) mv_chan->device->dma_desc_pool;
637 slot->async_tx.phys = 636 slot->async_tx.phys =
638 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 637 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 45f6297821bd..5fc2ef8d97fa 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -21,6 +21,15 @@
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23 23
24/* on architectures without dma-mapping capabilities we need to ensure
25 * that the asynchronous path compiles away
26 */
27#ifdef CONFIG_HAS_DMA
28#define __async_inline
29#else
30#define __async_inline __always_inline
31#endif
32
24/** 33/**
25 * dma_chan_ref - object used to manage dma channels received from the 34 * dma_chan_ref - object used to manage dma channels received from the
26 * dmaengine core. 35 * dmaengine core.
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 1956c8d46d32..2e2aa3df170c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -23,9 +23,6 @@
23 23
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/uio.h> 25#include <linux/uio.h>
26#include <linux/kref.h>
27#include <linux/completion.h>
28#include <linux/rcupdate.h>
29#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
30 27
31/** 28/**
@@ -205,6 +202,7 @@ struct dma_async_tx_descriptor {
205/** 202/**
206 * struct dma_device - info on the entity supplying DMA services 203 * struct dma_device - info on the entity supplying DMA services
207 * @chancnt: how many DMA channels are supported 204 * @chancnt: how many DMA channels are supported
205 * @privatecnt: how many DMA channels are requested by dma_request_channel
208 * @channels: the list of struct dma_chan 206 * @channels: the list of struct dma_chan
209 * @global_node: list_head for global dma_device_list 207 * @global_node: list_head for global dma_device_list
210 * @cap_mask: one or more dma_capability flags 208 * @cap_mask: one or more dma_capability flags
@@ -227,6 +225,7 @@ struct dma_async_tx_descriptor {
227struct dma_device { 225struct dma_device {
228 226
229 unsigned int chancnt; 227 unsigned int chancnt;
228 unsigned int privatecnt;
230 struct list_head channels; 229 struct list_head channels;
231 struct list_head global_node; 230 struct list_head global_node;
232 dma_cap_mask_t cap_mask; 231 dma_cap_mask_t cap_mask;
@@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void)
291} 290}
292#endif 291#endif
293 292
293#ifdef CONFIG_ASYNC_TX_DMA
294#define async_dmaengine_get() dmaengine_get()
295#define async_dmaengine_put() dmaengine_put()
296#define async_dma_find_channel(type) dma_find_channel(type)
297#else
298static inline void async_dmaengine_get(void)
299{
300}
301static inline void async_dmaengine_put(void)
302{
303}
304static inline struct dma_chan *
305async_dma_find_channel(enum dma_transaction_type type)
306{
307 return NULL;
308}
309#endif
310
294dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 311dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
295 void *dest, void *src, size_t len); 312 void *dest, void *src, size_t len);
296dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 313dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
337 set_bit(tx_type, dstp->bits); 354 set_bit(tx_type, dstp->bits);
338} 355}
339 356
357#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
358static inline void
359__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
360{
361 clear_bit(tx_type, dstp->bits);
362}
363
340#define dma_cap_zero(mask) __dma_cap_zero(&(mask)) 364#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
341static inline void __dma_cap_zero(dma_cap_mask_t *dstp) 365static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
342{ 366{
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index d797dde247f7..c8aad713a046 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -74,4 +74,23 @@ struct dw_dma_slave {
74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ 74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ 75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
76 76
77/* DMA API extensions */
78struct dw_cyclic_desc {
79 struct dw_desc **desc;
80 unsigned long periods;
81 void (*period_callback)(void *param);
82 void *period_callback_param;
83};
84
85struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
86 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
87 enum dma_data_direction direction);
88void dw_dma_cyclic_free(struct dma_chan *chan);
89int dw_dma_cyclic_start(struct dma_chan *chan);
90void dw_dma_cyclic_stop(struct dma_chan *chan);
91
92dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
93
94dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
95
77#endif /* DW_DMAC_H */ 96#endif /* DW_DMAC_H */