aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>2013-03-04 04:09:27 -0500
committerVinod Koul <vinod.koul@intel.com>2013-04-15 00:21:17 -0400
commite03e93a976d0f0da63f02fd3384c4b99cac8d715 (patch)
treed76e03e17a32a560eb7f78b52e8334e183bcff0b /drivers/dma
parent945b5af3cedcdfed6d2d940e53cd19933bb57386 (diff)
dmatest: create dmatest_info to keep test parameters
The proposed change will remove usage of the module parameters as global variables. In future it helps to run different test cases sequentially. The patch introduces the run_threaded_test() and stop_threaded_test() functions that could be used later outside of dmatest_init, dmatest_exit scope. Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dmatest.c160
1 files changed, 113 insertions, 47 deletions
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e3955be2e3a4..7f9e3cc9361b 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -78,8 +78,11 @@ MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
78#define PATTERN_OVERWRITE 0x20 78#define PATTERN_OVERWRITE 0x20
79#define PATTERN_COUNT_MASK 0x1f 79#define PATTERN_COUNT_MASK 0x1f
80 80
81struct dmatest_info;
82
81struct dmatest_thread { 83struct dmatest_thread {
82 struct list_head node; 84 struct list_head node;
85 struct dmatest_info *info;
83 struct task_struct *task; 86 struct task_struct *task;
84 struct dma_chan *chan; 87 struct dma_chan *chan;
85 u8 **srcs; 88 u8 **srcs;
@@ -93,6 +96,32 @@ struct dmatest_chan {
93 struct list_head threads; 96 struct list_head threads;
94}; 97};
95 98
99/**
100 * struct dmatest_info - test information.
101 * @buf_size: size of the memcpy test buffer
102 * @channel: bus ID of the channel to test
103 * @device: bus ID of the DMA Engine to test
104 * @threads_per_chan: number of threads to start per channel
105 * @max_channels: maximum number of channels to use
106 * @iterations: iterations before stopping test
107 * @xor_sources: number of xor source buffers
108 * @pq_sources: number of p+q source buffers
109 * @timeout: transfer timeout in msec, -1 for infinite timeout
110 */
111struct dmatest_info {
112 unsigned int buf_size;
113 char channel[20];
114 char device[20];
115 unsigned int threads_per_chan;
116 unsigned int max_channels;
117 unsigned int iterations;
118 unsigned int xor_sources;
119 unsigned int pq_sources;
120 int timeout;
121};
122
123static struct dmatest_info test_info;
124
96/* 125/*
97 * These are protected by dma_list_mutex since they're only used by 126 * These are protected by dma_list_mutex since they're only used by
98 * the DMA filter function callback 127 * the DMA filter function callback
@@ -100,18 +129,20 @@ struct dmatest_chan {
100static LIST_HEAD(dmatest_channels); 129static LIST_HEAD(dmatest_channels);
101static unsigned int nr_channels; 130static unsigned int nr_channels;
102 131
103static bool dmatest_match_channel(struct dma_chan *chan) 132static bool dmatest_match_channel(struct dmatest_info *info,
133 struct dma_chan *chan)
104{ 134{
105 if (test_channel[0] == '\0') 135 if (info->channel[0] == '\0')
106 return true; 136 return true;
107 return strcmp(dma_chan_name(chan), test_channel) == 0; 137 return strcmp(dma_chan_name(chan), info->channel) == 0;
108} 138}
109 139
110static bool dmatest_match_device(struct dma_device *device) 140static bool dmatest_match_device(struct dmatest_info *info,
141 struct dma_device *device)
111{ 142{
112 if (test_device[0] == '\0') 143 if (info->device[0] == '\0')
113 return true; 144 return true;
114 return strcmp(dev_name(device->dev), test_device) == 0; 145 return strcmp(dev_name(device->dev), info->device) == 0;
115} 146}
116 147
117static unsigned long dmatest_random(void) 148static unsigned long dmatest_random(void)
@@ -122,7 +153,8 @@ static unsigned long dmatest_random(void)
122 return buf; 153 return buf;
123} 154}
124 155
125static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) 156static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
157 unsigned int buf_size)
126{ 158{
127 unsigned int i; 159 unsigned int i;
128 u8 *buf; 160 u8 *buf;
@@ -133,13 +165,14 @@ static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
133 for ( ; i < start + len; i++) 165 for ( ; i < start + len; i++)
134 buf[i] = PATTERN_SRC | PATTERN_COPY 166 buf[i] = PATTERN_SRC | PATTERN_COPY
135 | (~i & PATTERN_COUNT_MASK); 167 | (~i & PATTERN_COUNT_MASK);
136 for ( ; i < test_buf_size; i++) 168 for ( ; i < buf_size; i++)
137 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 169 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
138 buf++; 170 buf++;
139 } 171 }
140} 172}
141 173
142static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) 174static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
175 unsigned int buf_size)
143{ 176{
144 unsigned int i; 177 unsigned int i;
145 u8 *buf; 178 u8 *buf;
@@ -150,7 +183,7 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
150 for ( ; i < start + len; i++) 183 for ( ; i < start + len; i++)
151 buf[i] = PATTERN_DST | PATTERN_OVERWRITE 184 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
152 | (~i & PATTERN_COUNT_MASK); 185 | (~i & PATTERN_COUNT_MASK);
153 for ( ; i < test_buf_size; i++) 186 for ( ; i < buf_size; i++)
154 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); 187 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
155 } 188 }
156} 189}
@@ -268,6 +301,7 @@ static int dmatest_func(void *data)
268 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); 301 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
269 struct dmatest_thread *thread = data; 302 struct dmatest_thread *thread = data;
270 struct dmatest_done done = { .wait = &done_wait }; 303 struct dmatest_done done = { .wait = &done_wait };
304 struct dmatest_info *info;
271 struct dma_chan *chan; 305 struct dma_chan *chan;
272 struct dma_device *dev; 306 struct dma_device *dev;
273 const char *thread_name; 307 const char *thread_name;
@@ -290,20 +324,21 @@ static int dmatest_func(void *data)
290 ret = -ENOMEM; 324 ret = -ENOMEM;
291 325
292 smp_rmb(); 326 smp_rmb();
327 info = thread->info;
293 chan = thread->chan; 328 chan = thread->chan;
294 dev = chan->device; 329 dev = chan->device;
295 if (thread->type == DMA_MEMCPY) 330 if (thread->type == DMA_MEMCPY)
296 src_cnt = dst_cnt = 1; 331 src_cnt = dst_cnt = 1;
297 else if (thread->type == DMA_XOR) { 332 else if (thread->type == DMA_XOR) {
298 /* force odd to ensure dst = src */ 333 /* force odd to ensure dst = src */
299 src_cnt = min_odd(xor_sources | 1, dev->max_xor); 334 src_cnt = min_odd(info->xor_sources | 1, dev->max_xor);
300 dst_cnt = 1; 335 dst_cnt = 1;
301 } else if (thread->type == DMA_PQ) { 336 } else if (thread->type == DMA_PQ) {
302 /* force odd to ensure dst = src */ 337 /* force odd to ensure dst = src */
303 src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0)); 338 src_cnt = min_odd(info->pq_sources | 1, dma_maxpq(dev, 0));
304 dst_cnt = 2; 339 dst_cnt = 2;
305 340
306 pq_coefs = kmalloc(pq_sources+1, GFP_KERNEL); 341 pq_coefs = kmalloc(info->pq_sources+1, GFP_KERNEL);
307 if (!pq_coefs) 342 if (!pq_coefs)
308 goto err_thread_type; 343 goto err_thread_type;
309 344
@@ -316,7 +351,7 @@ static int dmatest_func(void *data)
316 if (!thread->srcs) 351 if (!thread->srcs)
317 goto err_srcs; 352 goto err_srcs;
318 for (i = 0; i < src_cnt; i++) { 353 for (i = 0; i < src_cnt; i++) {
319 thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL); 354 thread->srcs[i] = kmalloc(info->buf_size, GFP_KERNEL);
320 if (!thread->srcs[i]) 355 if (!thread->srcs[i])
321 goto err_srcbuf; 356 goto err_srcbuf;
322 } 357 }
@@ -326,7 +361,7 @@ static int dmatest_func(void *data)
326 if (!thread->dsts) 361 if (!thread->dsts)
327 goto err_dsts; 362 goto err_dsts;
328 for (i = 0; i < dst_cnt; i++) { 363 for (i = 0; i < dst_cnt; i++) {
329 thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL); 364 thread->dsts[i] = kmalloc(info->buf_size, GFP_KERNEL);
330 if (!thread->dsts[i]) 365 if (!thread->dsts[i])
331 goto err_dstbuf; 366 goto err_dstbuf;
332 } 367 }
@@ -342,7 +377,7 @@ static int dmatest_func(void *data)
342 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; 377 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
343 378
344 while (!kthread_should_stop() 379 while (!kthread_should_stop()
345 && !(iterations && total_tests >= iterations)) { 380 && !(info->iterations && total_tests >= info->iterations)) {
346 struct dma_async_tx_descriptor *tx = NULL; 381 struct dma_async_tx_descriptor *tx = NULL;
347 dma_addr_t dma_srcs[src_cnt]; 382 dma_addr_t dma_srcs[src_cnt];
348 dma_addr_t dma_dsts[dst_cnt]; 383 dma_addr_t dma_dsts[dst_cnt];
@@ -358,24 +393,24 @@ static int dmatest_func(void *data)
358 else if (thread->type == DMA_PQ) 393 else if (thread->type == DMA_PQ)
359 align = dev->pq_align; 394 align = dev->pq_align;
360 395
361 if (1 << align > test_buf_size) { 396 if (1 << align > info->buf_size) {
362 pr_err("%u-byte buffer too small for %d-byte alignment\n", 397 pr_err("%u-byte buffer too small for %d-byte alignment\n",
363 test_buf_size, 1 << align); 398 info->buf_size, 1 << align);
364 break; 399 break;
365 } 400 }
366 401
367 len = dmatest_random() % test_buf_size + 1; 402 len = dmatest_random() % info->buf_size + 1;
368 len = (len >> align) << align; 403 len = (len >> align) << align;
369 if (!len) 404 if (!len)
370 len = 1 << align; 405 len = 1 << align;
371 src_off = dmatest_random() % (test_buf_size - len + 1); 406 src_off = dmatest_random() % (info->buf_size - len + 1);
372 dst_off = dmatest_random() % (test_buf_size - len + 1); 407 dst_off = dmatest_random() % (info->buf_size - len + 1);
373 408
374 src_off = (src_off >> align) << align; 409 src_off = (src_off >> align) << align;
375 dst_off = (dst_off >> align) << align; 410 dst_off = (dst_off >> align) << align;
376 411
377 dmatest_init_srcs(thread->srcs, src_off, len); 412 dmatest_init_srcs(thread->srcs, src_off, len, info->buf_size);
378 dmatest_init_dsts(thread->dsts, dst_off, len); 413 dmatest_init_dsts(thread->dsts, dst_off, len, info->buf_size);
379 414
380 for (i = 0; i < src_cnt; i++) { 415 for (i = 0; i < src_cnt; i++) {
381 u8 *buf = thread->srcs[i] + src_off; 416 u8 *buf = thread->srcs[i] + src_off;
@@ -396,16 +431,16 @@ static int dmatest_func(void *data)
396 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 431 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
397 for (i = 0; i < dst_cnt; i++) { 432 for (i = 0; i < dst_cnt; i++) {
398 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], 433 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
399 test_buf_size, 434 info->buf_size,
400 DMA_BIDIRECTIONAL); 435 DMA_BIDIRECTIONAL);
401 ret = dma_mapping_error(dev->dev, dma_dsts[i]); 436 ret = dma_mapping_error(dev->dev, dma_dsts[i]);
402 if (ret) { 437 if (ret) {
403 unmap_src(dev->dev, dma_srcs, len, src_cnt); 438 unmap_src(dev->dev, dma_srcs, len, src_cnt);
404 unmap_dst(dev->dev, dma_dsts, test_buf_size, i); 439 unmap_dst(dev->dev, dma_dsts, info->buf_size, i);
405 pr_warn("%s: #%u: mapping error %d with " 440 pr_warn("%s: #%u: mapping error %d with "
406 "dst_off=0x%x len=0x%x\n", 441 "dst_off=0x%x len=0x%x\n",
407 thread_name, total_tests - 1, ret, 442 thread_name, total_tests - 1, ret,
408 dst_off, test_buf_size); 443 dst_off, info->buf_size);
409 failed_tests++; 444 failed_tests++;
410 continue; 445 continue;
411 } 446 }
@@ -433,7 +468,7 @@ static int dmatest_func(void *data)
433 468
434 if (!tx) { 469 if (!tx) {
435 unmap_src(dev->dev, dma_srcs, len, src_cnt); 470 unmap_src(dev->dev, dma_srcs, len, src_cnt);
436 unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); 471 unmap_dst(dev->dev, dma_dsts, info->buf_size, dst_cnt);
437 pr_warning("%s: #%u: prep error with src_off=0x%x " 472 pr_warning("%s: #%u: prep error with src_off=0x%x "
438 "dst_off=0x%x len=0x%x\n", 473 "dst_off=0x%x len=0x%x\n",
439 thread_name, total_tests - 1, 474 thread_name, total_tests - 1,
@@ -461,7 +496,7 @@ static int dmatest_func(void *data)
461 496
462 wait_event_freezable_timeout(done_wait, 497 wait_event_freezable_timeout(done_wait,
463 done.done || kthread_should_stop(), 498 done.done || kthread_should_stop(),
464 msecs_to_jiffies(timeout)); 499 msecs_to_jiffies(info->timeout));
465 500
466 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 501 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
467 502
@@ -488,7 +523,7 @@ static int dmatest_func(void *data)
488 } 523 }
489 524
490 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ 525 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
491 unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); 526 unmap_dst(dev->dev, dma_dsts, info->buf_size, dst_cnt);
492 527
493 error_count = 0; 528 error_count = 0;
494 529
@@ -499,7 +534,7 @@ static int dmatest_func(void *data)
499 src_off + len, src_off, 534 src_off + len, src_off,
500 PATTERN_SRC | PATTERN_COPY, true); 535 PATTERN_SRC | PATTERN_COPY, true);
501 error_count += dmatest_verify(thread->srcs, src_off + len, 536 error_count += dmatest_verify(thread->srcs, src_off + len,
502 test_buf_size, src_off + len, 537 info->buf_size, src_off + len,
503 PATTERN_SRC, true); 538 PATTERN_SRC, true);
504 539
505 pr_debug("%s: verifying dest buffer...\n", 540 pr_debug("%s: verifying dest buffer...\n",
@@ -510,7 +545,7 @@ static int dmatest_func(void *data)
510 dst_off + len, src_off, 545 dst_off + len, src_off,
511 PATTERN_SRC | PATTERN_COPY, false); 546 PATTERN_SRC | PATTERN_COPY, false);
512 error_count += dmatest_verify(thread->dsts, dst_off + len, 547 error_count += dmatest_verify(thread->dsts, dst_off + len,
513 test_buf_size, dst_off + len, 548 info->buf_size, dst_off + len,
514 PATTERN_DST, false); 549 PATTERN_DST, false);
515 550
516 if (error_count) { 551 if (error_count) {
@@ -547,7 +582,7 @@ err_thread_type:
547 if (ret) 582 if (ret)
548 dmaengine_terminate_all(chan); 583 dmaengine_terminate_all(chan);
549 584
550 if (iterations > 0) 585 if (info->iterations > 0)
551 while (!kthread_should_stop()) { 586 while (!kthread_should_stop()) {
552 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); 587 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
553 interruptible_sleep_on(&wait_dmatest_exit); 588 interruptible_sleep_on(&wait_dmatest_exit);
@@ -576,7 +611,8 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
576 kfree(dtc); 611 kfree(dtc);
577} 612}
578 613
579static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type) 614static int dmatest_add_threads(struct dmatest_info *info,
615 struct dmatest_chan *dtc, enum dma_transaction_type type)
580{ 616{
581 struct dmatest_thread *thread; 617 struct dmatest_thread *thread;
582 struct dma_chan *chan = dtc->chan; 618 struct dma_chan *chan = dtc->chan;
@@ -592,7 +628,7 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty
592 else 628 else
593 return -EINVAL; 629 return -EINVAL;
594 630
595 for (i = 0; i < threads_per_chan; i++) { 631 for (i = 0; i < info->threads_per_chan; i++) {
596 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); 632 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
597 if (!thread) { 633 if (!thread) {
598 pr_warning("dmatest: No memory for %s-%s%u\n", 634 pr_warning("dmatest: No memory for %s-%s%u\n",
@@ -600,6 +636,7 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty
600 636
601 break; 637 break;
602 } 638 }
639 thread->info = info;
603 thread->chan = dtc->chan; 640 thread->chan = dtc->chan;
604 thread->type = type; 641 thread->type = type;
605 smp_wmb(); 642 smp_wmb();
@@ -620,7 +657,8 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty
620 return i; 657 return i;
621} 658}
622 659
623static int dmatest_add_channel(struct dma_chan *chan) 660static int dmatest_add_channel(struct dmatest_info *info,
661 struct dma_chan *chan)
624{ 662{
625 struct dmatest_chan *dtc; 663 struct dmatest_chan *dtc;
626 struct dma_device *dma_dev = chan->device; 664 struct dma_device *dma_dev = chan->device;
@@ -637,15 +675,15 @@ static int dmatest_add_channel(struct dma_chan *chan)
637 INIT_LIST_HEAD(&dtc->threads); 675 INIT_LIST_HEAD(&dtc->threads);
638 676
639 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 677 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
640 cnt = dmatest_add_threads(dtc, DMA_MEMCPY); 678 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
641 thread_count += cnt > 0 ? cnt : 0; 679 thread_count += cnt > 0 ? cnt : 0;
642 } 680 }
643 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 681 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
644 cnt = dmatest_add_threads(dtc, DMA_XOR); 682 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
645 thread_count += cnt > 0 ? cnt : 0; 683 thread_count += cnt > 0 ? cnt : 0;
646 } 684 }
647 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { 685 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
648 cnt = dmatest_add_threads(dtc, DMA_PQ); 686 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
649 thread_count += cnt > 0 ? cnt : 0; 687 thread_count += cnt > 0 ? cnt : 0;
650 } 688 }
651 689
@@ -660,13 +698,16 @@ static int dmatest_add_channel(struct dma_chan *chan)
660 698
661static bool filter(struct dma_chan *chan, void *param) 699static bool filter(struct dma_chan *chan, void *param)
662{ 700{
663 if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device)) 701 struct dmatest_info *info = param;
702
703 if (!dmatest_match_channel(info, chan) ||
704 !dmatest_match_device(info, chan->device))
664 return false; 705 return false;
665 else 706 else
666 return true; 707 return true;
667} 708}
668 709
669static int __init dmatest_init(void) 710static int run_threaded_test(struct dmatest_info *info)
670{ 711{
671 dma_cap_mask_t mask; 712 dma_cap_mask_t mask;
672 struct dma_chan *chan; 713 struct dma_chan *chan;
@@ -675,25 +716,22 @@ static int __init dmatest_init(void)
675 dma_cap_zero(mask); 716 dma_cap_zero(mask);
676 dma_cap_set(DMA_MEMCPY, mask); 717 dma_cap_set(DMA_MEMCPY, mask);
677 for (;;) { 718 for (;;) {
678 chan = dma_request_channel(mask, filter, NULL); 719 chan = dma_request_channel(mask, filter, info);
679 if (chan) { 720 if (chan) {
680 err = dmatest_add_channel(chan); 721 err = dmatest_add_channel(info, chan);
681 if (err) { 722 if (err) {
682 dma_release_channel(chan); 723 dma_release_channel(chan);
683 break; /* add_channel failed, punt */ 724 break; /* add_channel failed, punt */
684 } 725 }
685 } else 726 } else
686 break; /* no more channels available */ 727 break; /* no more channels available */
687 if (max_channels && nr_channels >= max_channels) 728 if (info->max_channels && nr_channels >= info->max_channels)
688 break; /* we have all we need */ 729 break; /* we have all we need */
689 } 730 }
690
691 return err; 731 return err;
692} 732}
693/* when compiled-in wait for drivers to load first */
694late_initcall(dmatest_init);
695 733
696static void __exit dmatest_exit(void) 734static void stop_threaded_test(struct dmatest_info *info)
697{ 735{
698 struct dmatest_chan *dtc, *_dtc; 736 struct dmatest_chan *dtc, *_dtc;
699 struct dma_chan *chan; 737 struct dma_chan *chan;
@@ -707,6 +745,34 @@ static void __exit dmatest_exit(void)
707 dma_release_channel(chan); 745 dma_release_channel(chan);
708 } 746 }
709} 747}
748
749static int __init dmatest_init(void)
750{
751 struct dmatest_info *info = &test_info;
752
753 memset(info, 0, sizeof(*info));
754
755 info->buf_size = test_buf_size;
756 strlcpy(info->channel, test_channel, sizeof(info->channel));
757 strlcpy(info->device, test_device, sizeof(info->device));
758 info->threads_per_chan = threads_per_chan;
759 info->max_channels = max_channels;
760 info->iterations = iterations;
761 info->xor_sources = xor_sources;
762 info->pq_sources = pq_sources;
763 info->timeout = timeout;
764
765 return run_threaded_test(info);
766}
767/* when compiled-in wait for drivers to load first */
768late_initcall(dmatest_init);
769
770static void __exit dmatest_exit(void)
771{
772 struct dmatest_info *info = &test_info;
773
774 stop_threaded_test(info);
775}
710module_exit(dmatest_exit); 776module_exit(dmatest_exit);
711 777
712MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 778MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");