aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/mv_xor.c
diff options
context:
space:
mode:
authorLior Amsalem <alior@marvell.com>2014-08-27 09:52:53 -0400
committerVinod Koul <vinod.koul@intel.com>2014-09-23 10:47:01 -0400
commitba87d13721b6fe4a2479871dc4f77c5bd8db3c32 (patch)
treecfbfdfead1eaac70a3cc9509e5339010e7ca094c /drivers/dma/mv_xor.c
parent0e7488ed01235fdd24ce7f0295dbbea0d45311bb (diff)
dma: mv_xor: Reduce interrupts by enabling EOD only when needed
This commit unmasks the end-of-chain interrupt and removes the end-of-descriptor command setting on all transactions, except those explicitly flagged with DMA_PREP_INTERRUPT. This allows to raise an interrupt only on chain completion, instead of on each descriptor completion, which reduces interrupt count. Signed-off-by: Lior Amsalem <alior@marvell.com> Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/mv_xor.c')
-rw-r--r--drivers/dma/mv_xor.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 4ee5bb194fd5..cbc90e5df7ff 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -46,13 +46,16 @@ static void mv_xor_issue_pending(struct dma_chan *chan);
46 ((chan)->dmadev.dev) 46 ((chan)->dmadev.dev)
47 47
48static void mv_desc_init(struct mv_xor_desc_slot *desc, 48static void mv_desc_init(struct mv_xor_desc_slot *desc,
49 dma_addr_t addr, u32 byte_count) 49 dma_addr_t addr, u32 byte_count,
50 enum dma_ctrl_flags flags)
50{ 51{
51 struct mv_xor_desc *hw_desc = desc->hw_desc; 52 struct mv_xor_desc *hw_desc = desc->hw_desc;
52 53
53 hw_desc->status = XOR_DESC_DMA_OWNED; 54 hw_desc->status = XOR_DESC_DMA_OWNED;
54 hw_desc->phy_next_desc = 0; 55 hw_desc->phy_next_desc = 0;
55 hw_desc->desc_command = XOR_DESC_EOD_INT_EN; 56 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
57 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
58 XOR_DESC_EOD_INT_EN : 0;
56 hw_desc->phy_dest_addr = addr; 59 hw_desc->phy_dest_addr = addr;
57 hw_desc->byte_count = byte_count; 60 hw_desc->byte_count = byte_count;
58} 61}
@@ -107,7 +110,10 @@ static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
107 110
108static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 111static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
109{ 112{
110 u32 val = ~(XOR_INT_END_OF_DESC << (chan->idx * 16)); 113 u32 val;
114
115 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
116 val = ~(val << (chan->idx * 16));
111 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); 117 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
112 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 118 writel_relaxed(val, XOR_INTR_CAUSE(chan));
113} 119}
@@ -510,7 +516,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
510 if (sw_desc) { 516 if (sw_desc) {
511 sw_desc->type = DMA_XOR; 517 sw_desc->type = DMA_XOR;
512 sw_desc->async_tx.flags = flags; 518 sw_desc->async_tx.flags = flags;
513 mv_desc_init(sw_desc, dest, len); 519 mv_desc_init(sw_desc, dest, len, flags);
514 sw_desc->unmap_src_cnt = src_cnt; 520 sw_desc->unmap_src_cnt = src_cnt;
515 sw_desc->unmap_len = len; 521 sw_desc->unmap_len = len;
516 while (src_cnt--) 522 while (src_cnt--)