aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/iop-adma.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2008-02-02 21:49:58 -0500
committerDan Williams <dan.j.williams@intel.com>2008-02-06 12:12:18 -0500
commitd4c56f97ff21df405d0cebe11f49e3c3c79662b5 (patch)
treee6b0de433d7c985982ac12815998242a786d87b2 /drivers/dma/iop-adma.c
parent0036731c88fdb5bf4f04a796a30b5e445fc57f54 (diff)
async_tx: replace 'int_en' with operation preparation flags
Pass a full set of flags to drivers' per-operation 'prep' routines. Currently the only flag passed is DMA_PREP_INTERRUPT. The expectation is that arch-specific async_tx_find_channel() implementations can exploit this capability to find the best channel for an operation. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Shannon Nelson <shannon.nelson@intel.com> Reviewed-by: Haavard Skinnemoen <hskinnemoen@atmel.com>
Diffstat (limited to 'drivers/dma/iop-adma.c')
-rw-r--r--drivers/dma/iop-adma.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index eda841c60690..3986d54492bd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -537,7 +537,7 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan)
537 537
538static struct dma_async_tx_descriptor * 538static struct dma_async_tx_descriptor *
539iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, 539iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
540 dma_addr_t dma_src, size_t len, int int_en) 540 dma_addr_t dma_src, size_t len, unsigned long flags)
541{ 541{
542 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 542 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
543 struct iop_adma_desc_slot *sw_desc, *grp_start; 543 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -555,7 +555,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
555 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 555 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
556 if (sw_desc) { 556 if (sw_desc) {
557 grp_start = sw_desc->group_head; 557 grp_start = sw_desc->group_head;
558 iop_desc_init_memcpy(grp_start, int_en); 558 iop_desc_init_memcpy(grp_start, flags);
559 iop_desc_set_byte_count(grp_start, iop_chan, len); 559 iop_desc_set_byte_count(grp_start, iop_chan, len);
560 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 560 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
561 iop_desc_set_memcpy_src_addr(grp_start, dma_src); 561 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
@@ -569,7 +569,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
569 569
570static struct dma_async_tx_descriptor * 570static struct dma_async_tx_descriptor *
571iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, 571iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
572 int value, size_t len, int int_en) 572 int value, size_t len, unsigned long flags)
573{ 573{
574 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 574 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
575 struct iop_adma_desc_slot *sw_desc, *grp_start; 575 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -587,7 +587,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
587 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 587 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
588 if (sw_desc) { 588 if (sw_desc) {
589 grp_start = sw_desc->group_head; 589 grp_start = sw_desc->group_head;
590 iop_desc_init_memset(grp_start, int_en); 590 iop_desc_init_memset(grp_start, flags);
591 iop_desc_set_byte_count(grp_start, iop_chan, len); 591 iop_desc_set_byte_count(grp_start, iop_chan, len);
592 iop_desc_set_block_fill_val(grp_start, value); 592 iop_desc_set_block_fill_val(grp_start, value);
593 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 593 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
@@ -602,7 +602,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
602static struct dma_async_tx_descriptor * 602static struct dma_async_tx_descriptor *
603iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, 603iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
604 dma_addr_t *dma_src, unsigned int src_cnt, size_t len, 604 dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
605 int int_en) 605 unsigned long flags)
606{ 606{
607 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 607 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
608 struct iop_adma_desc_slot *sw_desc, *grp_start; 608 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -613,15 +613,15 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
613 BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); 613 BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
614 614
615 dev_dbg(iop_chan->device->common.dev, 615 dev_dbg(iop_chan->device->common.dev,
616 "%s src_cnt: %d len: %u int_en: %d\n", 616 "%s src_cnt: %d len: %u flags: %lx\n",
617 __FUNCTION__, src_cnt, len, int_en); 617 __FUNCTION__, src_cnt, len, flags);
618 618
619 spin_lock_bh(&iop_chan->lock); 619 spin_lock_bh(&iop_chan->lock);
620 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); 620 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
621 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 621 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
622 if (sw_desc) { 622 if (sw_desc) {
623 grp_start = sw_desc->group_head; 623 grp_start = sw_desc->group_head;
624 iop_desc_init_xor(grp_start, src_cnt, int_en); 624 iop_desc_init_xor(grp_start, src_cnt, flags);
625 iop_desc_set_byte_count(grp_start, iop_chan, len); 625 iop_desc_set_byte_count(grp_start, iop_chan, len);
626 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 626 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
627 sw_desc->unmap_src_cnt = src_cnt; 627 sw_desc->unmap_src_cnt = src_cnt;
@@ -638,7 +638,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
638static struct dma_async_tx_descriptor * 638static struct dma_async_tx_descriptor *
639iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, 639iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
640 unsigned int src_cnt, size_t len, u32 *result, 640 unsigned int src_cnt, size_t len, u32 *result,
641 int int_en) 641 unsigned long flags)
642{ 642{
643 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 643 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
644 struct iop_adma_desc_slot *sw_desc, *grp_start; 644 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -655,7 +655,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
655 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 655 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
656 if (sw_desc) { 656 if (sw_desc) {
657 grp_start = sw_desc->group_head; 657 grp_start = sw_desc->group_head;
658 iop_desc_init_zero_sum(grp_start, src_cnt, int_en); 658 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
659 iop_desc_set_zero_sum_byte_count(grp_start, len); 659 iop_desc_set_zero_sum_byte_count(grp_start, len);
660 grp_start->xor_check_result = result; 660 grp_start->xor_check_result = result;
661 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 661 pr_debug("\t%s: grp_start->xor_check_result: %p\n",