diff options
author | Dan Williams <dan.j.williams@intel.com> | 2013-10-18 13:35:28 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2013-11-14 14:01:31 -0500 |
commit | 3bbdd49872931b8c4282aeb1cab5af7cce2cfb0d (patch) | |
tree | 0f38667f3e871c57c963a15c442620b5d2075b04 | |
parent | 173e86b2809234cb5f2a50e9a8c159b70e23da1c (diff) |
async_raid6_recov: convert to dmaengine_unmap_data
Use the generic unmap object to unmap dma buffers.
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Reported-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
[bzolnier: keep temporary dma_dest array in async_mult()]
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 69 |
1 files changed, 49 insertions, 20 deletions
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index a9f08a6a582e..a3a72a784421 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
27 | #include <linux/raid/pq.h> | 27 | #include <linux/raid/pq.h> |
28 | #include <linux/async_tx.h> | 28 | #include <linux/async_tx.h> |
29 | #include <linux/dmaengine.h> | ||
29 | 30 | ||
30 | static struct dma_async_tx_descriptor * | 31 | static struct dma_async_tx_descriptor * |
31 | async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, | 32 | async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, |
@@ -34,35 +35,47 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, | |||
34 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | 35 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, |
35 | &dest, 1, srcs, 2, len); | 36 | &dest, 1, srcs, 2, len); |
36 | struct dma_device *dma = chan ? chan->device : NULL; | 37 | struct dma_device *dma = chan ? chan->device : NULL; |
38 | struct dmaengine_unmap_data *unmap = NULL; | ||
37 | const u8 *amul, *bmul; | 39 | const u8 *amul, *bmul; |
38 | u8 ax, bx; | 40 | u8 ax, bx; |
39 | u8 *a, *b, *c; | 41 | u8 *a, *b, *c; |
40 | 42 | ||
41 | if (dma) { | 43 | if (dma) |
42 | dma_addr_t dma_dest[2]; | 44 | unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); |
43 | dma_addr_t dma_src[2]; | 45 | |
46 | if (unmap) { | ||
44 | struct device *dev = dma->dev; | 47 | struct device *dev = dma->dev; |
48 | dma_addr_t pq[2]; | ||
45 | struct dma_async_tx_descriptor *tx; | 49 | struct dma_async_tx_descriptor *tx; |
46 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; | 50 | enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | |
51 | DMA_COMPL_SKIP_DEST_UNMAP | | ||
52 | DMA_PREP_PQ_DISABLE_P; | ||
47 | 53 | ||
48 | if (submit->flags & ASYNC_TX_FENCE) | 54 | if (submit->flags & ASYNC_TX_FENCE) |
49 | dma_flags |= DMA_PREP_FENCE; | 55 | dma_flags |= DMA_PREP_FENCE; |
50 | dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | 56 | unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); |
51 | dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); | 57 | unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); |
52 | dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); | 58 | unmap->to_cnt = 2; |
53 | tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, | 59 | |
60 | unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | ||
61 | unmap->bidi_cnt = 1; | ||
62 | /* engine only looks at Q, but expects it to follow P */ | ||
63 | pq[1] = unmap->addr[2]; | ||
64 | |||
65 | unmap->len = len; | ||
66 | tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef, | ||
54 | len, dma_flags); | 67 | len, dma_flags); |
55 | if (tx) { | 68 | if (tx) { |
69 | dma_set_unmap(tx, unmap); | ||
56 | async_tx_submit(chan, tx, submit); | 70 | async_tx_submit(chan, tx, submit); |
71 | dmaengine_unmap_put(unmap); | ||
57 | return tx; | 72 | return tx; |
58 | } | 73 | } |
59 | 74 | ||
60 | /* could not get a descriptor, unmap and fall through to | 75 | /* could not get a descriptor, unmap and fall through to |
61 | * the synchronous path | 76 | * the synchronous path |
62 | */ | 77 | */ |
63 | dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); | 78 | dmaengine_unmap_put(unmap); |
64 | dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); | ||
65 | dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE); | ||
66 | } | 79 | } |
67 | 80 | ||
68 | /* run the operation synchronously */ | 81 | /* run the operation synchronously */ |
@@ -89,23 +102,40 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, | |||
89 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | 102 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, |
90 | &dest, 1, &src, 1, len); | 103 | &dest, 1, &src, 1, len); |
91 | struct dma_device *dma = chan ? chan->device : NULL; | 104 | struct dma_device *dma = chan ? chan->device : NULL; |
105 | struct dmaengine_unmap_data *unmap = NULL; | ||
92 | const u8 *qmul; /* Q multiplier table */ | 106 | const u8 *qmul; /* Q multiplier table */ |
93 | u8 *d, *s; | 107 | u8 *d, *s; |
94 | 108 | ||
95 | if (dma) { | 109 | if (dma) |
110 | unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); | ||
111 | |||
112 | if (unmap) { | ||
96 | dma_addr_t dma_dest[2]; | 113 | dma_addr_t dma_dest[2]; |
97 | dma_addr_t dma_src[1]; | ||
98 | struct device *dev = dma->dev; | 114 | struct device *dev = dma->dev; |
99 | struct dma_async_tx_descriptor *tx; | 115 | struct dma_async_tx_descriptor *tx; |
100 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; | 116 | enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | |
117 | DMA_COMPL_SKIP_DEST_UNMAP | | ||
118 | DMA_PREP_PQ_DISABLE_P; | ||
101 | 119 | ||
102 | if (submit->flags & ASYNC_TX_FENCE) | 120 | if (submit->flags & ASYNC_TX_FENCE) |
103 | dma_flags |= DMA_PREP_FENCE; | 121 | dma_flags |= DMA_PREP_FENCE; |
104 | dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | 122 | unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); |
105 | dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); | 123 | unmap->to_cnt++; |
106 | tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, | 124 | unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); |
107 | len, dma_flags); | 125 | dma_dest[1] = unmap->addr[1]; |
126 | unmap->bidi_cnt++; | ||
127 | unmap->len = len; | ||
128 | |||
129 | /* this looks funny, but the engine looks for Q at | ||
130 | * dma_dest[1] and ignores dma_dest[0] as a dest | ||
131 | * due to DMA_PREP_PQ_DISABLE_P | ||
132 | */ | ||
133 | tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr, | ||
134 | 1, &coef, len, dma_flags); | ||
135 | |||
108 | if (tx) { | 136 | if (tx) { |
137 | dma_set_unmap(tx, unmap); | ||
138 | dmaengine_unmap_put(unmap); | ||
109 | async_tx_submit(chan, tx, submit); | 139 | async_tx_submit(chan, tx, submit); |
110 | return tx; | 140 | return tx; |
111 | } | 141 | } |
@@ -113,8 +143,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, | |||
113 | /* could not get a descriptor, unmap and fall through to | 143 | /* could not get a descriptor, unmap and fall through to |
114 | * the synchronous path | 144 | * the synchronous path |
115 | */ | 145 | */ |
116 | dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); | 146 | dmaengine_unmap_put(unmap); |
117 | dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); | ||
118 | } | 147 | } |
119 | 148 | ||
120 | /* no channel available, or failed to allocate a descriptor, so | 149 | /* no channel available, or failed to allocate a descriptor, so |