aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/iop-adma.c
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>2013-10-18 13:35:32 -0400
committerDan Williams <dan.j.williams@intel.com>2013-11-14 14:04:38 -0500
commit54f8d501e842879143e867e70996574a54d1e130 (patch)
tree1fcd65a5152d330167f5eefba5cc5d514ec91da1 /drivers/dma/iop-adma.c
parent6f57fd0578dff23a4bd16118f0cb4201bcec91f1 (diff)
dmaengine: remove DMA unmap from drivers
Remove support for DMA unmapping from drivers as it is no longer needed (DMA core code is now handling it). Cc: Vinod Koul <vinod.koul@intel.com> Cc: Tomasz Figa <t.figa@samsung.com> Cc: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> [djbw: fix up chan2parent() unused warning in drivers/dma/dw/core.c] Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/iop-adma.c')
-rw-r--r--drivers/dma/iop-adma.c96
1 files changed, 2 insertions, 94 deletions
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 8f6e426590eb..173e26ff18f8 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
61 } 61 }
62} 62}
63 63
64static void
65iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
66{
67 struct dma_async_tx_descriptor *tx = &desc->async_tx;
68 struct iop_adma_desc_slot *unmap = desc->group_head;
69 struct device *dev = &iop_chan->device->pdev->dev;
70 u32 len = unmap->unmap_len;
71 enum dma_ctrl_flags flags = tx->flags;
72 u32 src_cnt;
73 dma_addr_t addr;
74 dma_addr_t dest;
75
76 src_cnt = unmap->unmap_src_cnt;
77 dest = iop_desc_get_dest_addr(unmap, iop_chan);
78 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
79 enum dma_data_direction dir;
80
81 if (src_cnt > 1) /* is xor? */
82 dir = DMA_BIDIRECTIONAL;
83 else
84 dir = DMA_FROM_DEVICE;
85
86 dma_unmap_page(dev, dest, len, dir);
87 }
88
89 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
90 while (src_cnt--) {
91 addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
92 if (addr == dest)
93 continue;
94 dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
95 }
96 }
97 desc->group_head = NULL;
98}
99
100static void
101iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
102{
103 struct dma_async_tx_descriptor *tx = &desc->async_tx;
104 struct iop_adma_desc_slot *unmap = desc->group_head;
105 struct device *dev = &iop_chan->device->pdev->dev;
106 u32 len = unmap->unmap_len;
107 enum dma_ctrl_flags flags = tx->flags;
108 u32 src_cnt = unmap->unmap_src_cnt;
109 dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
110 dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
111 int i;
112
113 if (tx->flags & DMA_PREP_CONTINUE)
114 src_cnt -= 3;
115
116 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
117 dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
118 dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
119 }
120
121 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
122 dma_addr_t addr;
123
124 for (i = 0; i < src_cnt; i++) {
125 addr = iop_desc_get_src_addr(unmap, iop_chan, i);
126 dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
127 }
128 if (desc->pq_check_result) {
129 dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
130 dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
131 }
132 }
133
134 desc->group_head = NULL;
135}
136
137
138static dma_cookie_t 64static dma_cookie_t
139iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, 65iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
140 struct iop_adma_chan *iop_chan, dma_cookie_t cookie) 66 struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
@@ -153,15 +79,8 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
153 tx->callback(tx->callback_param); 79 tx->callback(tx->callback_param);
154 80
155 dma_descriptor_unmap(tx); 81 dma_descriptor_unmap(tx);
156 /* unmap dma addresses 82 if (desc->group_head)
157 * (unmap_single vs unmap_page?) 83 desc->group_head = NULL;
158 */
159 if (desc->group_head && desc->unmap_len) {
160 if (iop_desc_is_pq(desc))
161 iop_desc_unmap_pq(iop_chan, desc);
162 else
163 iop_desc_unmap(iop_chan, desc);
164 }
165 } 84 }
166 85
167 /* run dependent operations */ 86 /* run dependent operations */
@@ -592,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
592 if (sw_desc) { 511 if (sw_desc) {
593 grp_start = sw_desc->group_head; 512 grp_start = sw_desc->group_head;
594 iop_desc_init_interrupt(grp_start, iop_chan); 513 iop_desc_init_interrupt(grp_start, iop_chan);
595 grp_start->unmap_len = 0;
596 sw_desc->async_tx.flags = flags; 514 sw_desc->async_tx.flags = flags;
597 } 515 }
598 spin_unlock_bh(&iop_chan->lock); 516 spin_unlock_bh(&iop_chan->lock);
@@ -624,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
624 iop_desc_set_byte_count(grp_start, iop_chan, len); 542 iop_desc_set_byte_count(grp_start, iop_chan, len);
625 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 543 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
626 iop_desc_set_memcpy_src_addr(grp_start, dma_src); 544 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
627 sw_desc->unmap_src_cnt = 1;
628 sw_desc->unmap_len = len;
629 sw_desc->async_tx.flags = flags; 545 sw_desc->async_tx.flags = flags;
630 } 546 }
631 spin_unlock_bh(&iop_chan->lock); 547 spin_unlock_bh(&iop_chan->lock);
@@ -658,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
658 iop_desc_init_xor(grp_start, src_cnt, flags); 574 iop_desc_init_xor(grp_start, src_cnt, flags);
659 iop_desc_set_byte_count(grp_start, iop_chan, len); 575 iop_desc_set_byte_count(grp_start, iop_chan, len);
660 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 576 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
661 sw_desc->unmap_src_cnt = src_cnt;
662 sw_desc->unmap_len = len;
663 sw_desc->async_tx.flags = flags; 577 sw_desc->async_tx.flags = flags;
664 while (src_cnt--) 578 while (src_cnt--)
665 iop_desc_set_xor_src_addr(grp_start, src_cnt, 579 iop_desc_set_xor_src_addr(grp_start, src_cnt,
@@ -695,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
695 grp_start->xor_check_result = result; 609 grp_start->xor_check_result = result;
696 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 610 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
697 __func__, grp_start->xor_check_result); 611 __func__, grp_start->xor_check_result);
698 sw_desc->unmap_src_cnt = src_cnt;
699 sw_desc->unmap_len = len;
700 sw_desc->async_tx.flags = flags; 612 sw_desc->async_tx.flags = flags;
701 while (src_cnt--) 613 while (src_cnt--)
702 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, 614 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
@@ -749,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
749 dst[0] = dst[1] & 0x7; 661 dst[0] = dst[1] & 0x7;
750 662
751 iop_desc_set_pq_addr(g, dst); 663 iop_desc_set_pq_addr(g, dst);
752 sw_desc->unmap_src_cnt = src_cnt;
753 sw_desc->unmap_len = len;
754 sw_desc->async_tx.flags = flags; 664 sw_desc->async_tx.flags = flags;
755 for (i = 0; i < src_cnt; i++) 665 for (i = 0; i < src_cnt; i++)
756 iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); 666 iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
@@ -805,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
805 g->pq_check_result = pqres; 715 g->pq_check_result = pqres;
806 pr_debug("\t%s: g->pq_check_result: %p\n", 716 pr_debug("\t%s: g->pq_check_result: %p\n",
807 __func__, g->pq_check_result); 717 __func__, g->pq_check_result);
808 sw_desc->unmap_src_cnt = src_cnt+2;
809 sw_desc->unmap_len = len;
810 sw_desc->async_tx.flags = flags; 718 sw_desc->async_tx.flags = flags;
811 while (src_cnt--) 719 while (src_cnt--)
812 iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, 720 iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,