aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:53 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:53 -0400
commit83544ae9f3991bfc7d5e0fe9a3008cd05a8d57b7 (patch)
treebc4b28c2e5bdae01a2c8a250176fcdac6ae7a8ce /crypto
parent9308add6ea4fedeba37b0d7c4630a542bd34f214 (diff)
dmaengine, async_tx: support alignment checks
Some engines have transfer size and address alignment restrictions. Add a per-operation alignment property to struct dma_device that the async routines and dmatest can use to check alignment capabilities. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/async_tx/async_memcpy.c2
-rw-r--r--crypto/async_tx/async_memset.c2
-rw-r--r--crypto/async_tx/async_pq.c6
-rw-r--r--crypto/async_tx/async_xor.c5
4 files changed, 9 insertions, 6 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index b38cbb3fd527..0ec1fb69d4ea 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -50,7 +50,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
50 struct dma_device *device = chan ? chan->device : NULL; 50 struct dma_device *device = chan ? chan->device : NULL;
51 struct dma_async_tx_descriptor *tx = NULL; 51 struct dma_async_tx_descriptor *tx = NULL;
52 52
53 if (device) { 53 if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
54 dma_addr_t dma_dest, dma_src; 54 dma_addr_t dma_dest, dma_src;
55 unsigned long dma_prep_flags = 0; 55 unsigned long dma_prep_flags = 0;
56 56
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index a374784e3329..58e4a8752aee 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -47,7 +47,7 @@ async_memset(struct page *dest, int val, unsigned int offset, size_t len,
47 struct dma_device *device = chan ? chan->device : NULL; 47 struct dma_device *device = chan ? chan->device : NULL;
48 struct dma_async_tx_descriptor *tx = NULL; 48 struct dma_async_tx_descriptor *tx = NULL;
49 49
50 if (device) { 50 if (device && is_dma_fill_aligned(device, offset, 0, len)) {
51 dma_addr_t dma_dest; 51 dma_addr_t dma_dest;
52 unsigned long dma_prep_flags = 0; 52 unsigned long dma_prep_flags = 0;
53 53
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index a25e290c39fb..b88db6d1dc65 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -211,7 +211,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
211 211
212 if (dma_src && device && 212 if (dma_src && device &&
213 (src_cnt <= dma_maxpq(device, 0) || 213 (src_cnt <= dma_maxpq(device, 0) ||
214 dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) { 214 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
215 is_dma_pq_aligned(device, offset, 0, len)) {
215 /* run the p+q asynchronously */ 216 /* run the p+q asynchronously */
216 pr_debug("%s: (async) disks: %d len: %zu\n", 217 pr_debug("%s: (async) disks: %d len: %zu\n",
217 __func__, disks, len); 218 __func__, disks, len);
@@ -274,7 +275,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
274 else if (sizeof(dma_addr_t) <= sizeof(struct page *)) 275 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
275 dma_src = (dma_addr_t *) blocks; 276 dma_src = (dma_addr_t *) blocks;
276 277
277 if (dma_src && device && disks <= dma_maxpq(device, 0)) { 278 if (dma_src && device && disks <= dma_maxpq(device, 0) &&
279 is_dma_pq_aligned(device, offset, 0, len)) {
278 struct device *dev = device->dev; 280 struct device *dev = device->dev;
279 dma_addr_t *pq = &dma_src[disks-2]; 281 dma_addr_t *pq = &dma_src[disks-2];
280 int i; 282 int i;
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index db279872ef3d..b459a9034aac 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -193,7 +193,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
193 else if (sizeof(dma_addr_t) <= sizeof(struct page *)) 193 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
194 dma_src = (dma_addr_t *) src_list; 194 dma_src = (dma_addr_t *) src_list;
195 195
196 if (dma_src && chan) { 196 if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
197 /* run the xor asynchronously */ 197 /* run the xor asynchronously */
198 pr_debug("%s (async): len: %zu\n", __func__, len); 198 pr_debug("%s (async): len: %zu\n", __func__, len);
199 199
@@ -265,7 +265,8 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
265 else if (sizeof(dma_addr_t) <= sizeof(struct page *)) 265 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
266 dma_src = (dma_addr_t *) src_list; 266 dma_src = (dma_addr_t *) src_list;
267 267
268 if (dma_src && device && src_cnt <= device->max_xor) { 268 if (dma_src && device && src_cnt <= device->max_xor &&
269 is_dma_xor_aligned(device, offset, 0, len)) {
269 unsigned long dma_prep_flags = 0; 270 unsigned long dma_prep_flags = 0;
270 int i; 271 int i;
271 272