aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2008-12-08 15:46:00 -0500
committerDan Williams <dan.j.williams@intel.com>2008-12-08 15:46:00 -0500
commita06d568f7c5e40e34ea64881842deb8f4382babf (patch)
tree15b38b4652705b7c58bd89052c81ab91ca94cc4a
parentb0b42b16ff2b90f17bc1a4308366c9beba4b276e (diff)
async_xor: dma_map destination DMA_BIDIRECTIONAL
Mapping the destination multiple times is a misuse of the dma-api. Since the destination may be reused as a source, ensure that it is only mapped once and that it is mapped bidirectionally. This appears to add ugliness on the unmap side in that it always reads back the destination address from the descriptor, but gcc can determine that dma_unmap is a nop and not emit the code that calculates its arguments. Cc: <stable@kernel.org> Cc: Saeed Bishara <saeed@marvell.com> Acked-by: Yuri Tikhonov <yur@emcraft.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--crypto/async_tx/async_xor.c11
-rw-r--r--drivers/dma/iop-adma.c16
-rw-r--r--drivers/dma/mv_xor.c15
3 files changed, 34 insertions, 8 deletions
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index c029d3eb9ef0..595b78672b36 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -53,10 +53,17 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
53 int xor_src_cnt; 53 int xor_src_cnt;
54 dma_addr_t dma_dest; 54 dma_addr_t dma_dest;
55 55
56 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE); 56 /* map the dest bidrectional in case it is re-used as a source */
57 for (i = 0; i < src_cnt; i++) 57 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
58 for (i = 0; i < src_cnt; i++) {
59 /* only map the dest once */
60 if (unlikely(src_list[i] == dest)) {
61 dma_src[i] = dma_dest;
62 continue;
63 }
58 dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, 64 dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
59 len, DMA_TO_DEVICE); 65 len, DMA_TO_DEVICE);
66 }
60 67
61 while (src_cnt) { 68 while (src_cnt) {
62 async_flags = flags; 69 async_flags = flags;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c7a9306d951d..6be317262200 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
85 enum dma_ctrl_flags flags = desc->async_tx.flags; 85 enum dma_ctrl_flags flags = desc->async_tx.flags;
86 u32 src_cnt; 86 u32 src_cnt;
87 dma_addr_t addr; 87 dma_addr_t addr;
88 dma_addr_t dest;
88 89
90 src_cnt = unmap->unmap_src_cnt;
91 dest = iop_desc_get_dest_addr(unmap, iop_chan);
89 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 92 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
90 addr = iop_desc_get_dest_addr(unmap, iop_chan); 93 enum dma_data_direction dir;
91 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 94
95 if (src_cnt > 1) /* is xor? */
96 dir = DMA_BIDIRECTIONAL;
97 else
98 dir = DMA_FROM_DEVICE;
99
100 dma_unmap_page(dev, dest, len, dir);
92 } 101 }
93 102
94 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 103 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
95 src_cnt = unmap->unmap_src_cnt;
96 while (src_cnt--) { 104 while (src_cnt--) {
97 addr = iop_desc_get_src_addr(unmap, 105 addr = iop_desc_get_src_addr(unmap,
98 iop_chan, 106 iop_chan,
99 src_cnt); 107 src_cnt);
108 if (addr == dest)
109 continue;
100 dma_unmap_page(dev, addr, len, 110 dma_unmap_page(dev, addr, len,
101 DMA_TO_DEVICE); 111 DMA_TO_DEVICE);
102 } 112 }
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 0328da020a10..bcda17426411 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
311 enum dma_ctrl_flags flags = desc->async_tx.flags; 311 enum dma_ctrl_flags flags = desc->async_tx.flags;
312 u32 src_cnt; 312 u32 src_cnt;
313 dma_addr_t addr; 313 dma_addr_t addr;
314 dma_addr_t dest;
314 315
316 src_cnt = unmap->unmap_src_cnt;
317 dest = mv_desc_get_dest_addr(unmap);
315 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 318 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
316 addr = mv_desc_get_dest_addr(unmap); 319 enum dma_data_direction dir;
317 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 320
321 if (src_cnt > 1) /* is xor ? */
322 dir = DMA_BIDIRECTIONAL;
323 else
324 dir = DMA_FROM_DEVICE;
325 dma_unmap_page(dev, dest, len, dir);
318 } 326 }
319 327
320 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 328 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
321 src_cnt = unmap->unmap_src_cnt;
322 while (src_cnt--) { 329 while (src_cnt--) {
323 addr = mv_desc_get_src_addr(unmap, 330 addr = mv_desc_get_src_addr(unmap,
324 src_cnt); 331 src_cnt);
332 if (addr == dest)
333 continue;
325 dma_unmap_page(dev, addr, len, 334 dma_unmap_page(dev, addr, len,
326 DMA_TO_DEVICE); 335 DMA_TO_DEVICE);
327 } 336 }