aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2013-11-06 19:30:10 -0500
committerDan Williams <dan.j.williams@intel.com>2013-11-14 14:04:40 -0500
commit4076e755dbec078c85352a8f77cec4c10181da4e (patch)
treee58bfe43d9074cb177516e80039c19765eeb4fa0 /drivers/dma
parent2d88ce76eb98c4ac4411dcb299cf61ca8999d2b9 (diff)
dmatest: convert to dmaengine_unmap_data
Remove the open coded unmap and add coverage for this core functionality to dmatest. Also fixes up a couple places where we leaked dma mappings. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dmatest.c86
1 files changed, 44 insertions, 42 deletions
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index dd4d84d556d5..0d050d2324e3 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -326,20 +326,6 @@ static void dmatest_callback(void *arg)
326 wake_up_all(done->wait); 326 wake_up_all(done->wait);
327} 327}
328 328
329static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
330 unsigned int count)
331{
332 while (count--)
333 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
334}
335
336static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
337 unsigned int count)
338{
339 while (count--)
340 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
341}
342
343static unsigned int min_odd(unsigned int x, unsigned int y) 329static unsigned int min_odd(unsigned int x, unsigned int y)
344{ 330{
345 unsigned int val = min(x, y); 331 unsigned int val = min(x, y);
@@ -484,8 +470,9 @@ static int dmatest_func(void *data)
484 while (!kthread_should_stop() 470 while (!kthread_should_stop()
485 && !(params->iterations && total_tests >= params->iterations)) { 471 && !(params->iterations && total_tests >= params->iterations)) {
486 struct dma_async_tx_descriptor *tx = NULL; 472 struct dma_async_tx_descriptor *tx = NULL;
487 dma_addr_t dma_srcs[src_cnt]; 473 struct dmaengine_unmap_data *um;
488 dma_addr_t dma_dsts[dst_cnt]; 474 dma_addr_t srcs[src_cnt];
475 dma_addr_t *dsts;
489 u8 align = 0; 476 u8 align = 0;
490 477
491 total_tests++; 478 total_tests++;
@@ -530,61 +517,75 @@ static int dmatest_func(void *data)
530 len = 1 << align; 517 len = 1 << align;
531 total_len += len; 518 total_len += len;
532 519
533 for (i = 0; i < src_cnt; i++) { 520 um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
534 u8 *buf = thread->srcs[i] + src_off; 521 GFP_KERNEL);
522 if (!um) {
523 failed_tests++;
524 result("unmap data NULL", total_tests,
525 src_off, dst_off, len, ret);
526 continue;
527 }
535 528
536 dma_srcs[i] = dma_map_single(dev->dev, buf, len, 529 um->len = params->buf_size;
537 DMA_TO_DEVICE); 530 for (i = 0; i < src_cnt; i++) {
538 ret = dma_mapping_error(dev->dev, dma_srcs[i]); 531 unsigned long buf = (unsigned long) thread->srcs[i];
532 struct page *pg = virt_to_page(buf);
533 unsigned pg_off = buf & ~PAGE_MASK;
534
535 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
536 um->len, DMA_TO_DEVICE);
537 srcs[i] = um->addr[i] + src_off;
538 ret = dma_mapping_error(dev->dev, um->addr[i]);
539 if (ret) { 539 if (ret) {
540 unmap_src(dev->dev, dma_srcs, len, i); 540 dmaengine_unmap_put(um);
541 result("src mapping error", total_tests, 541 result("src mapping error", total_tests,
542 src_off, dst_off, len, ret); 542 src_off, dst_off, len, ret);
543 failed_tests++; 543 failed_tests++;
544 continue; 544 continue;
545 } 545 }
546 um->to_cnt++;
546 } 547 }
547 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 548 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
549 dsts = &um->addr[src_cnt];
548 for (i = 0; i < dst_cnt; i++) { 550 for (i = 0; i < dst_cnt; i++) {
549 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], 551 unsigned long buf = (unsigned long) thread->dsts[i];
550 params->buf_size, 552 struct page *pg = virt_to_page(buf);
551 DMA_BIDIRECTIONAL); 553 unsigned pg_off = buf & ~PAGE_MASK;
552 ret = dma_mapping_error(dev->dev, dma_dsts[i]); 554
555 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
556 DMA_BIDIRECTIONAL);
557 ret = dma_mapping_error(dev->dev, dsts[i]);
553 if (ret) { 558 if (ret) {
554 unmap_src(dev->dev, dma_srcs, len, src_cnt); 559 dmaengine_unmap_put(um);
555 unmap_dst(dev->dev, dma_dsts, params->buf_size,
556 i);
557 result("dst mapping error", total_tests, 560 result("dst mapping error", total_tests,
558 src_off, dst_off, len, ret); 561 src_off, dst_off, len, ret);
559 failed_tests++; 562 failed_tests++;
560 continue; 563 continue;
561 } 564 }
565 um->bidi_cnt++;
562 } 566 }
563 567
564 if (thread->type == DMA_MEMCPY) 568 if (thread->type == DMA_MEMCPY)
565 tx = dev->device_prep_dma_memcpy(chan, 569 tx = dev->device_prep_dma_memcpy(chan,
566 dma_dsts[0] + dst_off, 570 dsts[0] + dst_off,
567 dma_srcs[0], len, 571 srcs[0], len, flags);
568 flags);
569 else if (thread->type == DMA_XOR) 572 else if (thread->type == DMA_XOR)
570 tx = dev->device_prep_dma_xor(chan, 573 tx = dev->device_prep_dma_xor(chan,
571 dma_dsts[0] + dst_off, 574 dsts[0] + dst_off,
572 dma_srcs, src_cnt, 575 srcs, src_cnt,
573 len, flags); 576 len, flags);
574 else if (thread->type == DMA_PQ) { 577 else if (thread->type == DMA_PQ) {
575 dma_addr_t dma_pq[dst_cnt]; 578 dma_addr_t dma_pq[dst_cnt];
576 579
577 for (i = 0; i < dst_cnt; i++) 580 for (i = 0; i < dst_cnt; i++)
578 dma_pq[i] = dma_dsts[i] + dst_off; 581 dma_pq[i] = dsts[i] + dst_off;
579 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, 582 tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
580 src_cnt, pq_coefs, 583 src_cnt, pq_coefs,
581 len, flags); 584 len, flags);
582 } 585 }
583 586
584 if (!tx) { 587 if (!tx) {
585 unmap_src(dev->dev, dma_srcs, len, src_cnt); 588 dmaengine_unmap_put(um);
586 unmap_dst(dev->dev, dma_dsts, params->buf_size,
587 dst_cnt);
588 result("prep error", total_tests, src_off, 589 result("prep error", total_tests, src_off,
589 dst_off, len, ret); 590 dst_off, len, ret);
590 msleep(100); 591 msleep(100);
@@ -598,6 +599,7 @@ static int dmatest_func(void *data)
598 cookie = tx->tx_submit(tx); 599 cookie = tx->tx_submit(tx);
599 600
600 if (dma_submit_error(cookie)) { 601 if (dma_submit_error(cookie)) {
602 dmaengine_unmap_put(um);
601 result("submit error", total_tests, src_off, 603 result("submit error", total_tests, src_off,
602 dst_off, len, ret); 604 dst_off, len, ret);
603 msleep(100); 605 msleep(100);
@@ -620,11 +622,13 @@ static int dmatest_func(void *data)
620 * free it this time?" dancing. For now, just 622 * free it this time?" dancing. For now, just
621 * leave it dangling. 623 * leave it dangling.
622 */ 624 */
625 dmaengine_unmap_put(um);
623 result("test timed out", total_tests, src_off, dst_off, 626 result("test timed out", total_tests, src_off, dst_off,
624 len, 0); 627 len, 0);
625 failed_tests++; 628 failed_tests++;
626 continue; 629 continue;
627 } else if (status != DMA_SUCCESS) { 630 } else if (status != DMA_SUCCESS) {
631 dmaengine_unmap_put(um);
628 result(status == DMA_ERROR ? 632 result(status == DMA_ERROR ?
629 "completion error status" : 633 "completion error status" :
630 "completion busy status", total_tests, src_off, 634 "completion busy status", total_tests, src_off,
@@ -633,9 +637,7 @@ static int dmatest_func(void *data)
633 continue; 637 continue;
634 } 638 }
635 639
636 /* Unmap by myself */ 640 dmaengine_unmap_put(um);
637 unmap_src(dev->dev, dma_srcs, len, src_cnt);
638 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
639 641
640 if (params->noverify) { 642 if (params->noverify) {
641 dbg_result("test passed", total_tests, src_off, dst_off, 643 dbg_result("test passed", total_tests, src_off, dst_off,