aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/dmaengine.c49
-rw-r--r--drivers/dma/ioat_dma.c39
-rw-r--r--drivers/dma/iop-adma.c124
4 files changed, 87 insertions, 126 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c46b7c219ee9..a703deffb795 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -5,6 +5,7 @@
5menuconfig DMADEVICES 5menuconfig DMADEVICES
6 bool "DMA Engine support" 6 bool "DMA Engine support"
7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX 7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
8 depends on !HIGHMEM64G
8 help 9 help
9 DMA engines can do asynchronous data transfers without 10 DMA engines can do asynchronous data transfers without
10 involving the host CPU. Currently, this framework can be 11 involving the host CPU. Currently, this framework can be
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index bcf52df30339..29965231b912 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -473,20 +473,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
473{ 473{
474 struct dma_device *dev = chan->device; 474 struct dma_device *dev = chan->device;
475 struct dma_async_tx_descriptor *tx; 475 struct dma_async_tx_descriptor *tx;
476 dma_addr_t addr; 476 dma_addr_t dma_dest, dma_src;
477 dma_cookie_t cookie; 477 dma_cookie_t cookie;
478 int cpu; 478 int cpu;
479 479
480 tx = dev->device_prep_dma_memcpy(chan, len, 0); 480 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
481 if (!tx) 481 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
482 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
483
484 if (!tx) {
485 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
486 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
482 return -ENOMEM; 487 return -ENOMEM;
488 }
483 489
484 tx->ack = 1; 490 tx->ack = 1;
485 tx->callback = NULL; 491 tx->callback = NULL;
486 addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
487 tx->tx_set_src(addr, tx, 0);
488 addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
489 tx->tx_set_dest(addr, tx, 0);
490 cookie = tx->tx_submit(tx); 492 cookie = tx->tx_submit(tx);
491 493
492 cpu = get_cpu(); 494 cpu = get_cpu();
@@ -517,20 +519,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
517{ 519{
518 struct dma_device *dev = chan->device; 520 struct dma_device *dev = chan->device;
519 struct dma_async_tx_descriptor *tx; 521 struct dma_async_tx_descriptor *tx;
520 dma_addr_t addr; 522 dma_addr_t dma_dest, dma_src;
521 dma_cookie_t cookie; 523 dma_cookie_t cookie;
522 int cpu; 524 int cpu;
523 525
524 tx = dev->device_prep_dma_memcpy(chan, len, 0); 526 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
525 if (!tx) 527 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
528 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
529
530 if (!tx) {
531 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
532 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
526 return -ENOMEM; 533 return -ENOMEM;
534 }
527 535
528 tx->ack = 1; 536 tx->ack = 1;
529 tx->callback = NULL; 537 tx->callback = NULL;
530 addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
531 tx->tx_set_src(addr, tx, 0);
532 addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
533 tx->tx_set_dest(addr, tx, 0);
534 cookie = tx->tx_submit(tx); 538 cookie = tx->tx_submit(tx);
535 539
536 cpu = get_cpu(); 540 cpu = get_cpu();
@@ -563,20 +567,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
563{ 567{
564 struct dma_device *dev = chan->device; 568 struct dma_device *dev = chan->device;
565 struct dma_async_tx_descriptor *tx; 569 struct dma_async_tx_descriptor *tx;
566 dma_addr_t addr; 570 dma_addr_t dma_dest, dma_src;
567 dma_cookie_t cookie; 571 dma_cookie_t cookie;
568 int cpu; 572 int cpu;
569 573
570 tx = dev->device_prep_dma_memcpy(chan, len, 0); 574 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
571 if (!tx) 575 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
576 DMA_FROM_DEVICE);
577 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
578
579 if (!tx) {
580 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
581 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
572 return -ENOMEM; 582 return -ENOMEM;
583 }
573 584
574 tx->ack = 1; 585 tx->ack = 1;
575 tx->callback = NULL; 586 tx->callback = NULL;
576 addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
577 tx->tx_set_src(addr, tx, 0);
578 addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
579 tx->tx_set_dest(addr, tx, 0);
580 cookie = tx->tx_submit(tx); 587 cookie = tx->tx_submit(tx);
581 588
582 cpu = get_cpu(); 589 cpu = get_cpu();
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 45e7b4666c7b..5bcfc55a2776 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -159,20 +159,6 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
159 return device->common.chancnt; 159 return device->common.chancnt;
160} 160}
161 161
162static void ioat_set_src(dma_addr_t addr,
163 struct dma_async_tx_descriptor *tx,
164 int index)
165{
166 tx_to_ioat_desc(tx)->src = addr;
167}
168
169static void ioat_set_dest(dma_addr_t addr,
170 struct dma_async_tx_descriptor *tx,
171 int index)
172{
173 tx_to_ioat_desc(tx)->dst = addr;
174}
175
176/** 162/**
177 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended 163 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
178 * descriptors to hw 164 * descriptors to hw
@@ -415,8 +401,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
415 401
416 memset(desc, 0, sizeof(*desc)); 402 memset(desc, 0, sizeof(*desc));
417 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); 403 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
418 desc_sw->async_tx.tx_set_src = ioat_set_src;
419 desc_sw->async_tx.tx_set_dest = ioat_set_dest;
420 switch (ioat_chan->device->version) { 404 switch (ioat_chan->device->version) {
421 case IOAT_VER_1_2: 405 case IOAT_VER_1_2:
422 desc_sw->async_tx.tx_submit = ioat1_tx_submit; 406 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
@@ -714,6 +698,8 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
714 698
715static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( 699static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
716 struct dma_chan *chan, 700 struct dma_chan *chan,
701 dma_addr_t dma_dest,
702 dma_addr_t dma_src,
717 size_t len, 703 size_t len,
718 int int_en) 704 int int_en)
719{ 705{
@@ -726,6 +712,8 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
726 712
727 if (new) { 713 if (new) {
728 new->len = len; 714 new->len = len;
715 new->dst = dma_dest;
716 new->src = dma_src;
729 return &new->async_tx; 717 return &new->async_tx;
730 } else 718 } else
731 return NULL; 719 return NULL;
@@ -733,6 +721,8 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
733 721
734static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( 722static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
735 struct dma_chan *chan, 723 struct dma_chan *chan,
724 dma_addr_t dma_dest,
725 dma_addr_t dma_src,
736 size_t len, 726 size_t len,
737 int int_en) 727 int int_en)
738{ 728{
@@ -749,6 +739,8 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
749 739
750 if (new) { 740 if (new) {
751 new->len = len; 741 new->len = len;
742 new->dst = dma_dest;
743 new->src = dma_src;
752 return &new->async_tx; 744 return &new->async_tx;
753 } else 745 } else
754 return NULL; 746 return NULL;
@@ -1045,7 +1037,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1045 u8 *dest; 1037 u8 *dest;
1046 struct dma_chan *dma_chan; 1038 struct dma_chan *dma_chan;
1047 struct dma_async_tx_descriptor *tx; 1039 struct dma_async_tx_descriptor *tx;
1048 dma_addr_t addr; 1040 dma_addr_t dma_dest, dma_src;
1049 dma_cookie_t cookie; 1041 dma_cookie_t cookie;
1050 int err = 0; 1042 int err = 0;
1051 1043
@@ -1073,7 +1065,12 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1073 goto out; 1065 goto out;
1074 } 1066 }
1075 1067
1076 tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0); 1068 dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1069 DMA_TO_DEVICE);
1070 dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1071 DMA_FROM_DEVICE);
1072 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
1073 IOAT_TEST_SIZE, 0);
1077 if (!tx) { 1074 if (!tx) {
1078 dev_err(&device->pdev->dev, 1075 dev_err(&device->pdev->dev,
1079 "Self-test prep failed, disabling\n"); 1076 "Self-test prep failed, disabling\n");
@@ -1082,12 +1079,6 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1082 } 1079 }
1083 1080
1084 async_tx_ack(tx); 1081 async_tx_ack(tx);
1085 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1086 DMA_TO_DEVICE);
1087 tx->tx_set_src(addr, tx, 0);
1088 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1089 DMA_FROM_DEVICE);
1090 tx->tx_set_dest(addr, tx, 0);
1091 tx->callback = ioat_dma_test_callback; 1082 tx->callback = ioat_dma_test_callback;
1092 tx->callback_param = (void *)0x8086; 1083 tx->callback_param = (void *)0x8086;
1093 cookie = tx->tx_submit(tx); 1084 cookie = tx->tx_submit(tx);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index b011b5ae22a2..eda841c60690 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -443,17 +443,6 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
443 return cookie; 443 return cookie;
444} 444}
445 445
446static void
447iop_adma_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
448 int index)
449{
450 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
451 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
452
453 /* to do: support transfers lengths > IOP_ADMA_MAX_BYTE_COUNT */
454 iop_desc_set_dest_addr(sw_desc->group_head, iop_chan, addr);
455}
456
457static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); 446static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
458static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); 447static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
459 448
@@ -486,7 +475,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
486 475
487 dma_async_tx_descriptor_init(&slot->async_tx, chan); 476 dma_async_tx_descriptor_init(&slot->async_tx, chan);
488 slot->async_tx.tx_submit = iop_adma_tx_submit; 477 slot->async_tx.tx_submit = iop_adma_tx_submit;
489 slot->async_tx.tx_set_dest = iop_adma_set_dest;
490 INIT_LIST_HEAD(&slot->chain_node); 478 INIT_LIST_HEAD(&slot->chain_node);
491 INIT_LIST_HEAD(&slot->slot_node); 479 INIT_LIST_HEAD(&slot->slot_node);
492 INIT_LIST_HEAD(&slot->async_tx.tx_list); 480 INIT_LIST_HEAD(&slot->async_tx.tx_list);
@@ -547,18 +535,9 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan)
547 return sw_desc ? &sw_desc->async_tx : NULL; 535 return sw_desc ? &sw_desc->async_tx : NULL;
548} 536}
549 537
550static void
551iop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
552 int index)
553{
554 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
555 struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
556
557 iop_desc_set_memcpy_src_addr(grp_start, addr);
558}
559
560static struct dma_async_tx_descriptor * 538static struct dma_async_tx_descriptor *
561iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en) 539iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
540 dma_addr_t dma_src, size_t len, int int_en)
562{ 541{
563 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 542 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
564 struct iop_adma_desc_slot *sw_desc, *grp_start; 543 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -578,9 +557,10 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
578 grp_start = sw_desc->group_head; 557 grp_start = sw_desc->group_head;
579 iop_desc_init_memcpy(grp_start, int_en); 558 iop_desc_init_memcpy(grp_start, int_en);
580 iop_desc_set_byte_count(grp_start, iop_chan, len); 559 iop_desc_set_byte_count(grp_start, iop_chan, len);
560 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
561 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
581 sw_desc->unmap_src_cnt = 1; 562 sw_desc->unmap_src_cnt = 1;
582 sw_desc->unmap_len = len; 563 sw_desc->unmap_len = len;
583 sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src;
584 } 564 }
585 spin_unlock_bh(&iop_chan->lock); 565 spin_unlock_bh(&iop_chan->lock);
586 566
@@ -588,8 +568,8 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
588} 568}
589 569
590static struct dma_async_tx_descriptor * 570static struct dma_async_tx_descriptor *
591iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len, 571iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
592 int int_en) 572 int value, size_t len, int int_en)
593{ 573{
594 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 574 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
595 struct iop_adma_desc_slot *sw_desc, *grp_start; 575 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -610,6 +590,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
610 iop_desc_init_memset(grp_start, int_en); 590 iop_desc_init_memset(grp_start, int_en);
611 iop_desc_set_byte_count(grp_start, iop_chan, len); 591 iop_desc_set_byte_count(grp_start, iop_chan, len);
612 iop_desc_set_block_fill_val(grp_start, value); 592 iop_desc_set_block_fill_val(grp_start, value);
593 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
613 sw_desc->unmap_src_cnt = 1; 594 sw_desc->unmap_src_cnt = 1;
614 sw_desc->unmap_len = len; 595 sw_desc->unmap_len = len;
615 } 596 }
@@ -618,19 +599,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
618 return sw_desc ? &sw_desc->async_tx : NULL; 599 return sw_desc ? &sw_desc->async_tx : NULL;
619} 600}
620 601
621static void
622iop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
623 int index)
624{
625 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
626 struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
627
628 iop_desc_set_xor_src_addr(grp_start, index, addr);
629}
630
631static struct dma_async_tx_descriptor * 602static struct dma_async_tx_descriptor *
632iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len, 603iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
633 int int_en) 604 dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
605 int int_en)
634{ 606{
635 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 607 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
636 struct iop_adma_desc_slot *sw_desc, *grp_start; 608 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -651,29 +623,22 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,
651 grp_start = sw_desc->group_head; 623 grp_start = sw_desc->group_head;
652 iop_desc_init_xor(grp_start, src_cnt, int_en); 624 iop_desc_init_xor(grp_start, src_cnt, int_en);
653 iop_desc_set_byte_count(grp_start, iop_chan, len); 625 iop_desc_set_byte_count(grp_start, iop_chan, len);
626 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
654 sw_desc->unmap_src_cnt = src_cnt; 627 sw_desc->unmap_src_cnt = src_cnt;
655 sw_desc->unmap_len = len; 628 sw_desc->unmap_len = len;
656 sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src; 629 while (src_cnt--)
630 iop_desc_set_xor_src_addr(grp_start, src_cnt,
631 dma_src[src_cnt]);
657 } 632 }
658 spin_unlock_bh(&iop_chan->lock); 633 spin_unlock_bh(&iop_chan->lock);
659 634
660 return sw_desc ? &sw_desc->async_tx : NULL; 635 return sw_desc ? &sw_desc->async_tx : NULL;
661} 636}
662 637
663static void
664iop_adma_xor_zero_sum_set_src(dma_addr_t addr,
665 struct dma_async_tx_descriptor *tx,
666 int index)
667{
668 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
669 struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
670
671 iop_desc_set_zero_sum_src_addr(grp_start, index, addr);
672}
673
674static struct dma_async_tx_descriptor * 638static struct dma_async_tx_descriptor *
675iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt, 639iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
676 size_t len, u32 *result, int int_en) 640 unsigned int src_cnt, size_t len, u32 *result,
641 int int_en)
677{ 642{
678 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 643 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
679 struct iop_adma_desc_slot *sw_desc, *grp_start; 644 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -697,7 +662,9 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,
697 __FUNCTION__, grp_start->xor_check_result); 662 __FUNCTION__, grp_start->xor_check_result);
698 sw_desc->unmap_src_cnt = src_cnt; 663 sw_desc->unmap_src_cnt = src_cnt;
699 sw_desc->unmap_len = len; 664 sw_desc->unmap_len = len;
700 sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src; 665 while (src_cnt--)
666 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
667 dma_src[src_cnt]);
701 } 668 }
702 spin_unlock_bh(&iop_chan->lock); 669 spin_unlock_bh(&iop_chan->lock);
703 670
@@ -882,13 +849,12 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
882 goto out; 849 goto out;
883 } 850 }
884 851
885 tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1);
886 dest_dma = dma_map_single(dma_chan->device->dev, dest, 852 dest_dma = dma_map_single(dma_chan->device->dev, dest,
887 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); 853 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
888 iop_adma_set_dest(dest_dma, tx, 0);
889 src_dma = dma_map_single(dma_chan->device->dev, src, 854 src_dma = dma_map_single(dma_chan->device->dev, src,
890 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); 855 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
891 iop_adma_memcpy_set_src(src_dma, tx, 0); 856 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
857 IOP_ADMA_TEST_SIZE, 1);
892 858
893 cookie = iop_adma_tx_submit(tx); 859 cookie = iop_adma_tx_submit(tx);
894 iop_adma_issue_pending(dma_chan); 860 iop_adma_issue_pending(dma_chan);
@@ -929,6 +895,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
929 struct page *dest; 895 struct page *dest;
930 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; 896 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
931 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; 897 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
898 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
932 dma_addr_t dma_addr, dest_dma; 899 dma_addr_t dma_addr, dest_dma;
933 struct dma_async_tx_descriptor *tx; 900 struct dma_async_tx_descriptor *tx;
934 struct dma_chan *dma_chan; 901 struct dma_chan *dma_chan;
@@ -981,17 +948,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
981 } 948 }
982 949
983 /* test xor */ 950 /* test xor */
984 tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST,
985 PAGE_SIZE, 1);
986 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, 951 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
987 PAGE_SIZE, DMA_FROM_DEVICE); 952 PAGE_SIZE, DMA_FROM_DEVICE);
988 iop_adma_set_dest(dest_dma, tx, 0); 953 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
989 954 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
990 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { 955 0, PAGE_SIZE, DMA_TO_DEVICE);
991 dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, 956 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
992 PAGE_SIZE, DMA_TO_DEVICE); 957 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1);
993 iop_adma_xor_set_src(dma_addr, tx, i);
994 }
995 958
996 cookie = iop_adma_tx_submit(tx); 959 cookie = iop_adma_tx_submit(tx);
997 iop_adma_issue_pending(dma_chan); 960 iop_adma_issue_pending(dma_chan);
@@ -1032,13 +995,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
1032 995
1033 zero_sum_result = 1; 996 zero_sum_result = 1;
1034 997
1035 tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, 998 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1036 PAGE_SIZE, &zero_sum_result, 1); 999 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1037 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { 1000 zero_sum_srcs[i], 0, PAGE_SIZE,
1038 dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], 1001 DMA_TO_DEVICE);
1039 0, PAGE_SIZE, DMA_TO_DEVICE); 1002 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
1040 iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); 1003 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1041 } 1004 &zero_sum_result, 1);
1042 1005
1043 cookie = iop_adma_tx_submit(tx); 1006 cookie = iop_adma_tx_submit(tx);
1044 iop_adma_issue_pending(dma_chan); 1007 iop_adma_issue_pending(dma_chan);
@@ -1060,10 +1023,9 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
1060 } 1023 }
1061 1024
1062 /* test memset */ 1025 /* test memset */
1063 tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1);
1064 dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, 1026 dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
1065 PAGE_SIZE, DMA_FROM_DEVICE); 1027 PAGE_SIZE, DMA_FROM_DEVICE);
1066 iop_adma_set_dest(dma_addr, tx, 0); 1028 tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1);
1067 1029
1068 cookie = iop_adma_tx_submit(tx); 1030 cookie = iop_adma_tx_submit(tx);
1069 iop_adma_issue_pending(dma_chan); 1031 iop_adma_issue_pending(dma_chan);
@@ -1089,13 +1051,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
1089 1051
1090 /* test for non-zero parity sum */ 1052 /* test for non-zero parity sum */
1091 zero_sum_result = 0; 1053 zero_sum_result = 0;
1092 tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, 1054 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1093 PAGE_SIZE, &zero_sum_result, 1); 1055 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1094 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { 1056 zero_sum_srcs[i], 0, PAGE_SIZE,
1095 dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], 1057 DMA_TO_DEVICE);
1096 0, PAGE_SIZE, DMA_TO_DEVICE); 1058 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
1097 iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); 1059 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1098 } 1060 &zero_sum_result, 1);
1099 1061
1100 cookie = iop_adma_tx_submit(tx); 1062 cookie = iop_adma_tx_submit(tx);
1101 iop_adma_issue_pending(dma_chan); 1063 iop_adma_issue_pending(dma_chan);