aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLior Amsalem <alior@marvell.com>2014-08-27 09:52:50 -0400
committerVinod Koul <vinod.koul@intel.com>2014-09-23 10:47:00 -0400
commit3e4f52e2da9f66ba9c19b9266fa9ffcaee2f3ecc (patch)
treec5f923dd0382720ec0b3121661333dfe91929996
parentb8291ddeed581e57327d715d29ffc501b9d48c5f (diff)
dma: mv_xor: Simplify the DMA_MEMCPY operation
A memory copy operation can be expressed as an XOR operation with one source. This commit removes code duplication in the driver by reusing the XOR operation for the MEMCPY. As an added benefit, we can now put MEMCPY and XOR descriptors on the same chain, which improves performance. Signed-off-by: Lior Amsalem <alior@marvell.com> Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/mv_xor.c77
1 files changed, 12 insertions, 65 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7c3876838032..1e43f187231e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -82,13 +82,6 @@ static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
82 hw_desc->phy_dest_addr = addr; 82 hw_desc->phy_dest_addr = addr;
83} 83}
84 84
85static int mv_chan_memset_slot_count(size_t len)
86{
87 return 1;
88}
89
90#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
91
92static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, 85static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
93 int index, dma_addr_t addr) 86 int index, dma_addr_t addr)
94{ 87{
@@ -144,17 +137,6 @@ static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
144 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 137 writel_relaxed(val, XOR_INTR_CAUSE(chan));
145} 138}
146 139
147static int mv_can_chain(struct mv_xor_desc_slot *desc)
148{
149 struct mv_xor_desc_slot *chain_old_tail = list_entry(
150 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
151
152 if (chain_old_tail->type != desc->type)
153 return 0;
154
155 return 1;
156}
157
158static void mv_set_mode(struct mv_xor_chan *chan, 140static void mv_set_mode(struct mv_xor_chan *chan,
159 enum dma_transaction_type type) 141 enum dma_transaction_type type)
160{ 142{
@@ -236,8 +218,6 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
236{ 218{
237 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", 219 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
238 __func__, __LINE__, sw_desc); 220 __func__, __LINE__, sw_desc);
239 if (sw_desc->type != mv_chan->current_type)
240 mv_set_mode(mv_chan, sw_desc->type);
241 221
242 /* set the hardware chain */ 222 /* set the hardware chain */
243 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 223 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
@@ -492,9 +472,6 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
492 list_splice_init(&grp_start->tx_list, 472 list_splice_init(&grp_start->tx_list,
493 &old_chain_tail->chain_node); 473 &old_chain_tail->chain_node);
494 474
495 if (!mv_can_chain(grp_start))
496 goto submit_done;
497
498 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", 475 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
499 &old_chain_tail->async_tx.phys); 476 &old_chain_tail->async_tx.phys);
500 477
@@ -516,7 +493,6 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
516 if (new_hw_chain) 493 if (new_hw_chain)
517 mv_xor_start_new_chain(mv_chan, grp_start); 494 mv_xor_start_new_chain(mv_chan, grp_start);
518 495
519submit_done:
520 spin_unlock_bh(&mv_chan->lock); 496 spin_unlock_bh(&mv_chan->lock);
521 497
522 return cookie; 498 return cookie;
@@ -573,45 +549,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
573} 549}
574 550
575static struct dma_async_tx_descriptor * 551static struct dma_async_tx_descriptor *
576mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
577 size_t len, unsigned long flags)
578{
579 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
580 struct mv_xor_desc_slot *sw_desc, *grp_start;
581 int slot_cnt;
582
583 dev_dbg(mv_chan_to_devp(mv_chan),
584 "%s dest: %pad src %pad len: %u flags: %ld\n",
585 __func__, &dest, &src, len, flags);
586 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
587 return NULL;
588
589 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
590
591 spin_lock_bh(&mv_chan->lock);
592 slot_cnt = mv_chan_memcpy_slot_count(len);
593 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
594 if (sw_desc) {
595 sw_desc->type = DMA_MEMCPY;
596 sw_desc->async_tx.flags = flags;
597 grp_start = sw_desc->group_head;
598 mv_desc_init(grp_start, flags);
599 mv_desc_set_byte_count(grp_start, len);
600 mv_desc_set_dest_addr(sw_desc->group_head, dest);
601 mv_desc_set_src_addr(grp_start, 0, src);
602 sw_desc->unmap_src_cnt = 1;
603 sw_desc->unmap_len = len;
604 }
605 spin_unlock_bh(&mv_chan->lock);
606
607 dev_dbg(mv_chan_to_devp(mv_chan),
608 "%s sw_desc %p async_tx %p\n",
609 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
610
611 return sw_desc ? &sw_desc->async_tx : NULL;
612}
613
614static struct dma_async_tx_descriptor *
615mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 552mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
616 unsigned int src_cnt, size_t len, unsigned long flags) 553 unsigned int src_cnt, size_t len, unsigned long flags)
617{ 554{
@@ -636,7 +573,6 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
636 sw_desc->async_tx.flags = flags; 573 sw_desc->async_tx.flags = flags;
637 grp_start = sw_desc->group_head; 574 grp_start = sw_desc->group_head;
638 mv_desc_init(grp_start, flags); 575 mv_desc_init(grp_start, flags);
639 /* the byte count field is the same as in memcpy desc*/
640 mv_desc_set_byte_count(grp_start, len); 576 mv_desc_set_byte_count(grp_start, len);
641 mv_desc_set_dest_addr(sw_desc->group_head, dest); 577 mv_desc_set_dest_addr(sw_desc->group_head, dest);
642 sw_desc->unmap_src_cnt = src_cnt; 578 sw_desc->unmap_src_cnt = src_cnt;
@@ -651,6 +587,17 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
651 return sw_desc ? &sw_desc->async_tx : NULL; 587 return sw_desc ? &sw_desc->async_tx : NULL;
652} 588}
653 589
590static struct dma_async_tx_descriptor *
591mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
592 size_t len, unsigned long flags)
593{
594 /*
595 * A MEMCPY operation is identical to an XOR operation with only
596 * a single source address.
597 */
598 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
599}
600
654static void mv_xor_free_chan_resources(struct dma_chan *chan) 601static void mv_xor_free_chan_resources(struct dma_chan *chan)
655{ 602{
656 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 603 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
@@ -1071,7 +1018,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
1071 1018
1072 mv_chan_unmask_interrupts(mv_chan); 1019 mv_chan_unmask_interrupts(mv_chan);
1073 1020
1074 mv_set_mode(mv_chan, DMA_MEMCPY); 1021 mv_set_mode(mv_chan, DMA_XOR);
1075 1022
1076 spin_lock_init(&mv_chan->lock); 1023 spin_lock_init(&mv_chan->lock);
1077 INIT_LIST_HEAD(&mv_chan->chain); 1024 INIT_LIST_HEAD(&mv_chan->chain);