aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-06 14:16:11 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-06 14:16:11 -0500
commit3d4d4582e5b3f67a68f2cf32fd5b70d8d80f119d (patch)
tree18d270847537d1a9d1a396d03e585654130630db /drivers
parent8f1bfa4c5c093e97154be4ec969bdf7190aeff6a (diff)
parent47437b2c9a64315efeb3d84e97ffefd6c3c67ef1 (diff)
Merge branch 'async-tx-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop into fix
* 'async-tx-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop: async_tx: allow architecture specific async_tx_find_channel implementations async_tx: replace 'int_en' with operation preparation flags async_tx: kill tx_set_src and tx_set_dest methods async_tx: kill ASYNC_TX_ASSUME_COHERENT iop-adma: use LIST_HEAD instead of LIST_HEAD_INIT async_tx: use LIST_HEAD instead of LIST_HEAD_INIT async_tx: fix compile breakage, mark do_async_xor __always_inline
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/dmaengine.c49
-rw-r--r--drivers/dma/ioat_dma.c43
-rw-r--r--drivers/dma/iop-adma.c138
4 files changed, 96 insertions, 135 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c46b7c219ee9..a703deffb795 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -5,6 +5,7 @@
5menuconfig DMADEVICES 5menuconfig DMADEVICES
6 bool "DMA Engine support" 6 bool "DMA Engine support"
7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX 7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
8 depends on !HIGHMEM64G
8 help 9 help
9 DMA engines can do asynchronous data transfers without 10 DMA engines can do asynchronous data transfers without
10 involving the host CPU. Currently, this framework can be 11 involving the host CPU. Currently, this framework can be
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index bcf52df30339..29965231b912 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -473,20 +473,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
473{ 473{
474 struct dma_device *dev = chan->device; 474 struct dma_device *dev = chan->device;
475 struct dma_async_tx_descriptor *tx; 475 struct dma_async_tx_descriptor *tx;
476 dma_addr_t addr; 476 dma_addr_t dma_dest, dma_src;
477 dma_cookie_t cookie; 477 dma_cookie_t cookie;
478 int cpu; 478 int cpu;
479 479
480 tx = dev->device_prep_dma_memcpy(chan, len, 0); 480 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
481 if (!tx) 481 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
482 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
483
484 if (!tx) {
485 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
486 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
482 return -ENOMEM; 487 return -ENOMEM;
488 }
483 489
484 tx->ack = 1; 490 tx->ack = 1;
485 tx->callback = NULL; 491 tx->callback = NULL;
486 addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
487 tx->tx_set_src(addr, tx, 0);
488 addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
489 tx->tx_set_dest(addr, tx, 0);
490 cookie = tx->tx_submit(tx); 492 cookie = tx->tx_submit(tx);
491 493
492 cpu = get_cpu(); 494 cpu = get_cpu();
@@ -517,20 +519,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
517{ 519{
518 struct dma_device *dev = chan->device; 520 struct dma_device *dev = chan->device;
519 struct dma_async_tx_descriptor *tx; 521 struct dma_async_tx_descriptor *tx;
520 dma_addr_t addr; 522 dma_addr_t dma_dest, dma_src;
521 dma_cookie_t cookie; 523 dma_cookie_t cookie;
522 int cpu; 524 int cpu;
523 525
524 tx = dev->device_prep_dma_memcpy(chan, len, 0); 526 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
525 if (!tx) 527 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
528 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
529
530 if (!tx) {
531 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
532 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
526 return -ENOMEM; 533 return -ENOMEM;
534 }
527 535
528 tx->ack = 1; 536 tx->ack = 1;
529 tx->callback = NULL; 537 tx->callback = NULL;
530 addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
531 tx->tx_set_src(addr, tx, 0);
532 addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
533 tx->tx_set_dest(addr, tx, 0);
534 cookie = tx->tx_submit(tx); 538 cookie = tx->tx_submit(tx);
535 539
536 cpu = get_cpu(); 540 cpu = get_cpu();
@@ -563,20 +567,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
563{ 567{
564 struct dma_device *dev = chan->device; 568 struct dma_device *dev = chan->device;
565 struct dma_async_tx_descriptor *tx; 569 struct dma_async_tx_descriptor *tx;
566 dma_addr_t addr; 570 dma_addr_t dma_dest, dma_src;
567 dma_cookie_t cookie; 571 dma_cookie_t cookie;
568 int cpu; 572 int cpu;
569 573
570 tx = dev->device_prep_dma_memcpy(chan, len, 0); 574 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
571 if (!tx) 575 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
576 DMA_FROM_DEVICE);
577 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
578
579 if (!tx) {
580 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
581 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
572 return -ENOMEM; 582 return -ENOMEM;
583 }
573 584
574 tx->ack = 1; 585 tx->ack = 1;
575 tx->callback = NULL; 586 tx->callback = NULL;
576 addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
577 tx->tx_set_src(addr, tx, 0);
578 addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
579 tx->tx_set_dest(addr, tx, 0);
580 cookie = tx->tx_submit(tx); 587 cookie = tx->tx_submit(tx);
581 588
582 cpu = get_cpu(); 589 cpu = get_cpu();
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 45e7b4666c7b..dff38accc5c1 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -159,20 +159,6 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
159 return device->common.chancnt; 159 return device->common.chancnt;
160} 160}
161 161
162static void ioat_set_src(dma_addr_t addr,
163 struct dma_async_tx_descriptor *tx,
164 int index)
165{
166 tx_to_ioat_desc(tx)->src = addr;
167}
168
169static void ioat_set_dest(dma_addr_t addr,
170 struct dma_async_tx_descriptor *tx,
171 int index)
172{
173 tx_to_ioat_desc(tx)->dst = addr;
174}
175
176/** 162/**
177 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended 163 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
178 * descriptors to hw 164 * descriptors to hw
@@ -415,8 +401,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
415 401
416 memset(desc, 0, sizeof(*desc)); 402 memset(desc, 0, sizeof(*desc));
417 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); 403 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
418 desc_sw->async_tx.tx_set_src = ioat_set_src;
419 desc_sw->async_tx.tx_set_dest = ioat_set_dest;
420 switch (ioat_chan->device->version) { 404 switch (ioat_chan->device->version) {
421 case IOAT_VER_1_2: 405 case IOAT_VER_1_2:
422 desc_sw->async_tx.tx_submit = ioat1_tx_submit; 406 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
@@ -714,8 +698,10 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
714 698
715static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( 699static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
716 struct dma_chan *chan, 700 struct dma_chan *chan,
701 dma_addr_t dma_dest,
702 dma_addr_t dma_src,
717 size_t len, 703 size_t len,
718 int int_en) 704 unsigned long flags)
719{ 705{
720 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 706 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
721 struct ioat_desc_sw *new; 707 struct ioat_desc_sw *new;
@@ -726,6 +712,8 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
726 712
727 if (new) { 713 if (new) {
728 new->len = len; 714 new->len = len;
715 new->dst = dma_dest;
716 new->src = dma_src;
729 return &new->async_tx; 717 return &new->async_tx;
730 } else 718 } else
731 return NULL; 719 return NULL;
@@ -733,8 +721,10 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
733 721
734static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( 722static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
735 struct dma_chan *chan, 723 struct dma_chan *chan,
724 dma_addr_t dma_dest,
725 dma_addr_t dma_src,
736 size_t len, 726 size_t len,
737 int int_en) 727 unsigned long flags)
738{ 728{
739 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 729 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
740 struct ioat_desc_sw *new; 730 struct ioat_desc_sw *new;
@@ -749,6 +739,8 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
749 739
750 if (new) { 740 if (new) {
751 new->len = len; 741 new->len = len;
742 new->dst = dma_dest;
743 new->src = dma_src;
752 return &new->async_tx; 744 return &new->async_tx;
753 } else 745 } else
754 return NULL; 746 return NULL;
@@ -1045,7 +1037,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1045 u8 *dest; 1037 u8 *dest;
1046 struct dma_chan *dma_chan; 1038 struct dma_chan *dma_chan;
1047 struct dma_async_tx_descriptor *tx; 1039 struct dma_async_tx_descriptor *tx;
1048 dma_addr_t addr; 1040 dma_addr_t dma_dest, dma_src;
1049 dma_cookie_t cookie; 1041 dma_cookie_t cookie;
1050 int err = 0; 1042 int err = 0;
1051 1043
@@ -1073,7 +1065,12 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1073 goto out; 1065 goto out;
1074 } 1066 }
1075 1067
1076 tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0); 1068 dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1069 DMA_TO_DEVICE);
1070 dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1071 DMA_FROM_DEVICE);
1072 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
1073 IOAT_TEST_SIZE, 0);
1077 if (!tx) { 1074 if (!tx) {
1078 dev_err(&device->pdev->dev, 1075 dev_err(&device->pdev->dev,
1079 "Self-test prep failed, disabling\n"); 1076 "Self-test prep failed, disabling\n");
@@ -1082,12 +1079,6 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1082 } 1079 }
1083 1080
1084 async_tx_ack(tx); 1081 async_tx_ack(tx);
1085 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1086 DMA_TO_DEVICE);
1087 tx->tx_set_src(addr, tx, 0);
1088 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1089 DMA_FROM_DEVICE);
1090 tx->tx_set_dest(addr, tx, 0);
1091 tx->callback = ioat_dma_test_callback; 1082 tx->callback = ioat_dma_test_callback;
1092 tx->callback_param = (void *)0x8086; 1083 tx->callback_param = (void *)0x8086;
1093 cookie = tx->tx_submit(tx); 1084 cookie = tx->tx_submit(tx);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e5c62b75f36f..3986d54492bd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -284,7 +284,7 @@ iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
284 int slots_per_op) 284 int slots_per_op)
285{ 285{
286 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; 286 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
287 struct list_head chain = LIST_HEAD_INIT(chain); 287 LIST_HEAD(chain);
288 int slots_found, retry = 0; 288 int slots_found, retry = 0;
289 289
290 /* start search from the last allocated descrtiptor 290 /* start search from the last allocated descrtiptor
@@ -443,17 +443,6 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
443 return cookie; 443 return cookie;
444} 444}
445 445
446static void
447iop_adma_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
448 int index)
449{
450 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
451 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
452
453 /* to do: support transfers lengths > IOP_ADMA_MAX_BYTE_COUNT */
454 iop_desc_set_dest_addr(sw_desc->group_head, iop_chan, addr);
455}
456
457static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); 446static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
458static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); 447static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
459 448
@@ -486,7 +475,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
486 475
487 dma_async_tx_descriptor_init(&slot->async_tx, chan); 476 dma_async_tx_descriptor_init(&slot->async_tx, chan);
488 slot->async_tx.tx_submit = iop_adma_tx_submit; 477 slot->async_tx.tx_submit = iop_adma_tx_submit;
489 slot->async_tx.tx_set_dest = iop_adma_set_dest;
490 INIT_LIST_HEAD(&slot->chain_node); 478 INIT_LIST_HEAD(&slot->chain_node);
491 INIT_LIST_HEAD(&slot->slot_node); 479 INIT_LIST_HEAD(&slot->slot_node);
492 INIT_LIST_HEAD(&slot->async_tx.tx_list); 480 INIT_LIST_HEAD(&slot->async_tx.tx_list);
@@ -547,18 +535,9 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan)
547 return sw_desc ? &sw_desc->async_tx : NULL; 535 return sw_desc ? &sw_desc->async_tx : NULL;
548} 536}
549 537
550static void
551iop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
552 int index)
553{
554 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
555 struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
556
557 iop_desc_set_memcpy_src_addr(grp_start, addr);
558}
559
560static struct dma_async_tx_descriptor * 538static struct dma_async_tx_descriptor *
561iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en) 539iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
540 dma_addr_t dma_src, size_t len, unsigned long flags)
562{ 541{
563 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 542 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
564 struct iop_adma_desc_slot *sw_desc, *grp_start; 543 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -576,11 +555,12 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
576 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 555 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
577 if (sw_desc) { 556 if (sw_desc) {
578 grp_start = sw_desc->group_head; 557 grp_start = sw_desc->group_head;
579 iop_desc_init_memcpy(grp_start, int_en); 558 iop_desc_init_memcpy(grp_start, flags);
580 iop_desc_set_byte_count(grp_start, iop_chan, len); 559 iop_desc_set_byte_count(grp_start, iop_chan, len);
560 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
561 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
581 sw_desc->unmap_src_cnt = 1; 562 sw_desc->unmap_src_cnt = 1;
582 sw_desc->unmap_len = len; 563 sw_desc->unmap_len = len;
583 sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src;
584 } 564 }
585 spin_unlock_bh(&iop_chan->lock); 565 spin_unlock_bh(&iop_chan->lock);
586 566
@@ -588,8 +568,8 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
588} 568}
589 569
590static struct dma_async_tx_descriptor * 570static struct dma_async_tx_descriptor *
591iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len, 571iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
592 int int_en) 572 int value, size_t len, unsigned long flags)
593{ 573{
594 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 574 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
595 struct iop_adma_desc_slot *sw_desc, *grp_start; 575 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -607,9 +587,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
607 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 587 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
608 if (sw_desc) { 588 if (sw_desc) {
609 grp_start = sw_desc->group_head; 589 grp_start = sw_desc->group_head;
610 iop_desc_init_memset(grp_start, int_en); 590 iop_desc_init_memset(grp_start, flags);
611 iop_desc_set_byte_count(grp_start, iop_chan, len); 591 iop_desc_set_byte_count(grp_start, iop_chan, len);
612 iop_desc_set_block_fill_val(grp_start, value); 592 iop_desc_set_block_fill_val(grp_start, value);
593 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
613 sw_desc->unmap_src_cnt = 1; 594 sw_desc->unmap_src_cnt = 1;
614 sw_desc->unmap_len = len; 595 sw_desc->unmap_len = len;
615 } 596 }
@@ -618,19 +599,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
618 return sw_desc ? &sw_desc->async_tx : NULL; 599 return sw_desc ? &sw_desc->async_tx : NULL;
619} 600}
620 601
621static void
622iop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
623 int index)
624{
625 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
626 struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
627
628 iop_desc_set_xor_src_addr(grp_start, index, addr);
629}
630
631static struct dma_async_tx_descriptor * 602static struct dma_async_tx_descriptor *
632iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len, 603iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
633 int int_en) 604 dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
605 unsigned long flags)
634{ 606{
635 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 607 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
636 struct iop_adma_desc_slot *sw_desc, *grp_start; 608 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -641,39 +613,32 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,
641 BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); 613 BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
642 614
643 dev_dbg(iop_chan->device->common.dev, 615 dev_dbg(iop_chan->device->common.dev,
644 "%s src_cnt: %d len: %u int_en: %d\n", 616 "%s src_cnt: %d len: %u flags: %lx\n",
645 __FUNCTION__, src_cnt, len, int_en); 617 __FUNCTION__, src_cnt, len, flags);
646 618
647 spin_lock_bh(&iop_chan->lock); 619 spin_lock_bh(&iop_chan->lock);
648 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); 620 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
649 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 621 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
650 if (sw_desc) { 622 if (sw_desc) {
651 grp_start = sw_desc->group_head; 623 grp_start = sw_desc->group_head;
652 iop_desc_init_xor(grp_start, src_cnt, int_en); 624 iop_desc_init_xor(grp_start, src_cnt, flags);
653 iop_desc_set_byte_count(grp_start, iop_chan, len); 625 iop_desc_set_byte_count(grp_start, iop_chan, len);
626 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
654 sw_desc->unmap_src_cnt = src_cnt; 627 sw_desc->unmap_src_cnt = src_cnt;
655 sw_desc->unmap_len = len; 628 sw_desc->unmap_len = len;
656 sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src; 629 while (src_cnt--)
630 iop_desc_set_xor_src_addr(grp_start, src_cnt,
631 dma_src[src_cnt]);
657 } 632 }
658 spin_unlock_bh(&iop_chan->lock); 633 spin_unlock_bh(&iop_chan->lock);
659 634
660 return sw_desc ? &sw_desc->async_tx : NULL; 635 return sw_desc ? &sw_desc->async_tx : NULL;
661} 636}
662 637
663static void
664iop_adma_xor_zero_sum_set_src(dma_addr_t addr,
665 struct dma_async_tx_descriptor *tx,
666 int index)
667{
668 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
669 struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
670
671 iop_desc_set_zero_sum_src_addr(grp_start, index, addr);
672}
673
674static struct dma_async_tx_descriptor * 638static struct dma_async_tx_descriptor *
675iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt, 639iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
676 size_t len, u32 *result, int int_en) 640 unsigned int src_cnt, size_t len, u32 *result,
641 unsigned long flags)
677{ 642{
678 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 643 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
679 struct iop_adma_desc_slot *sw_desc, *grp_start; 644 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -690,14 +655,16 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,
690 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 655 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
691 if (sw_desc) { 656 if (sw_desc) {
692 grp_start = sw_desc->group_head; 657 grp_start = sw_desc->group_head;
693 iop_desc_init_zero_sum(grp_start, src_cnt, int_en); 658 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
694 iop_desc_set_zero_sum_byte_count(grp_start, len); 659 iop_desc_set_zero_sum_byte_count(grp_start, len);
695 grp_start->xor_check_result = result; 660 grp_start->xor_check_result = result;
696 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 661 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
697 __FUNCTION__, grp_start->xor_check_result); 662 __FUNCTION__, grp_start->xor_check_result);
698 sw_desc->unmap_src_cnt = src_cnt; 663 sw_desc->unmap_src_cnt = src_cnt;
699 sw_desc->unmap_len = len; 664 sw_desc->unmap_len = len;
700 sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src; 665 while (src_cnt--)
666 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
667 dma_src[src_cnt]);
701 } 668 }
702 spin_unlock_bh(&iop_chan->lock); 669 spin_unlock_bh(&iop_chan->lock);
703 670
@@ -882,13 +849,12 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
882 goto out; 849 goto out;
883 } 850 }
884 851
885 tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1);
886 dest_dma = dma_map_single(dma_chan->device->dev, dest, 852 dest_dma = dma_map_single(dma_chan->device->dev, dest,
887 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); 853 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
888 iop_adma_set_dest(dest_dma, tx, 0);
889 src_dma = dma_map_single(dma_chan->device->dev, src, 854 src_dma = dma_map_single(dma_chan->device->dev, src,
890 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); 855 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
891 iop_adma_memcpy_set_src(src_dma, tx, 0); 856 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
857 IOP_ADMA_TEST_SIZE, 1);
892 858
893 cookie = iop_adma_tx_submit(tx); 859 cookie = iop_adma_tx_submit(tx);
894 iop_adma_issue_pending(dma_chan); 860 iop_adma_issue_pending(dma_chan);
@@ -929,6 +895,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
929 struct page *dest; 895 struct page *dest;
930 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; 896 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
931 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; 897 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
898 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
932 dma_addr_t dma_addr, dest_dma; 899 dma_addr_t dma_addr, dest_dma;
933 struct dma_async_tx_descriptor *tx; 900 struct dma_async_tx_descriptor *tx;
934 struct dma_chan *dma_chan; 901 struct dma_chan *dma_chan;
@@ -981,17 +948,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
981 } 948 }
982 949
983 /* test xor */ 950 /* test xor */
984 tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST,
985 PAGE_SIZE, 1);
986 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, 951 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
987 PAGE_SIZE, DMA_FROM_DEVICE); 952 PAGE_SIZE, DMA_FROM_DEVICE);
988 iop_adma_set_dest(dest_dma, tx, 0); 953 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
989 954 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
990 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { 955 0, PAGE_SIZE, DMA_TO_DEVICE);
991 dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, 956 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
992 PAGE_SIZE, DMA_TO_DEVICE); 957 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1);
993 iop_adma_xor_set_src(dma_addr, tx, i);
994 }
995 958
996 cookie = iop_adma_tx_submit(tx); 959 cookie = iop_adma_tx_submit(tx);
997 iop_adma_issue_pending(dma_chan); 960 iop_adma_issue_pending(dma_chan);
@@ -1032,13 +995,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
1032 995
1033 zero_sum_result = 1; 996 zero_sum_result = 1;
1034 997
1035 tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, 998 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1036 PAGE_SIZE, &zero_sum_result, 1); 999 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1037 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { 1000 zero_sum_srcs[i], 0, PAGE_SIZE,
1038 dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], 1001 DMA_TO_DEVICE);
1039 0, PAGE_SIZE, DMA_TO_DEVICE); 1002 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
1040 iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); 1003 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1041 } 1004 &zero_sum_result, 1);
1042 1005
1043 cookie = iop_adma_tx_submit(tx); 1006 cookie = iop_adma_tx_submit(tx);
1044 iop_adma_issue_pending(dma_chan); 1007 iop_adma_issue_pending(dma_chan);
@@ -1060,10 +1023,9 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
1060 } 1023 }
1061 1024
1062 /* test memset */ 1025 /* test memset */
1063 tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1);
1064 dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, 1026 dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
1065 PAGE_SIZE, DMA_FROM_DEVICE); 1027 PAGE_SIZE, DMA_FROM_DEVICE);
1066 iop_adma_set_dest(dma_addr, tx, 0); 1028 tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1);
1067 1029
1068 cookie = iop_adma_tx_submit(tx); 1030 cookie = iop_adma_tx_submit(tx);
1069 iop_adma_issue_pending(dma_chan); 1031 iop_adma_issue_pending(dma_chan);
@@ -1089,13 +1051,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
1089 1051
1090 /* test for non-zero parity sum */ 1052 /* test for non-zero parity sum */
1091 zero_sum_result = 0; 1053 zero_sum_result = 0;
1092 tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, 1054 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1093 PAGE_SIZE, &zero_sum_result, 1); 1055 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1094 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { 1056 zero_sum_srcs[i], 0, PAGE_SIZE,
1095 dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], 1057 DMA_TO_DEVICE);
1096 0, PAGE_SIZE, DMA_TO_DEVICE); 1058 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
1097 iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); 1059 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1098 } 1060 &zero_sum_result, 1);
1099 1061
1100 cookie = iop_adma_tx_submit(tx); 1062 cookie = iop_adma_tx_submit(tx);
1101 iop_adma_issue_pending(dma_chan); 1063 iop_adma_issue_pending(dma_chan);