aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:53 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:53 -0400
commit83544ae9f3991bfc7d5e0fe9a3008cd05a8d57b7 (patch)
treebc4b28c2e5bdae01a2c8a250176fcdac6ae7a8ce
parent9308add6ea4fedeba37b0d7c4630a542bd34f214 (diff)
dmaengine, async_tx: support alignment checks
Some engines have transfer size and address alignment restrictions. Add a per-operation alignment property to struct dma_device that the async routines and dmatest can use to check alignment capabilities. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--crypto/async_tx/async_memcpy.c2
-rw-r--r--crypto/async_tx/async_memset.c2
-rw-r--r--crypto/async_tx/async_pq.c6
-rw-r--r--crypto/async_tx/async_xor.c5
-rw-r--r--drivers/dma/dmatest.c14
-rw-r--r--include/linux/dmaengine.h44
6 files changed, 67 insertions, 6 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index b38cbb3fd527..0ec1fb69d4ea 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -50,7 +50,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
50 struct dma_device *device = chan ? chan->device : NULL; 50 struct dma_device *device = chan ? chan->device : NULL;
51 struct dma_async_tx_descriptor *tx = NULL; 51 struct dma_async_tx_descriptor *tx = NULL;
52 52
53 if (device) { 53 if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
54 dma_addr_t dma_dest, dma_src; 54 dma_addr_t dma_dest, dma_src;
55 unsigned long dma_prep_flags = 0; 55 unsigned long dma_prep_flags = 0;
56 56
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index a374784e3329..58e4a8752aee 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -47,7 +47,7 @@ async_memset(struct page *dest, int val, unsigned int offset, size_t len,
47 struct dma_device *device = chan ? chan->device : NULL; 47 struct dma_device *device = chan ? chan->device : NULL;
48 struct dma_async_tx_descriptor *tx = NULL; 48 struct dma_async_tx_descriptor *tx = NULL;
49 49
50 if (device) { 50 if (device && is_dma_fill_aligned(device, offset, 0, len)) {
51 dma_addr_t dma_dest; 51 dma_addr_t dma_dest;
52 unsigned long dma_prep_flags = 0; 52 unsigned long dma_prep_flags = 0;
53 53
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index a25e290c39fb..b88db6d1dc65 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -211,7 +211,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
211 211
212 if (dma_src && device && 212 if (dma_src && device &&
213 (src_cnt <= dma_maxpq(device, 0) || 213 (src_cnt <= dma_maxpq(device, 0) ||
214 dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) { 214 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
215 is_dma_pq_aligned(device, offset, 0, len)) {
215 /* run the p+q asynchronously */ 216 /* run the p+q asynchronously */
216 pr_debug("%s: (async) disks: %d len: %zu\n", 217 pr_debug("%s: (async) disks: %d len: %zu\n",
217 __func__, disks, len); 218 __func__, disks, len);
@@ -274,7 +275,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
274 else if (sizeof(dma_addr_t) <= sizeof(struct page *)) 275 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
275 dma_src = (dma_addr_t *) blocks; 276 dma_src = (dma_addr_t *) blocks;
276 277
277 if (dma_src && device && disks <= dma_maxpq(device, 0)) { 278 if (dma_src && device && disks <= dma_maxpq(device, 0) &&
279 is_dma_pq_aligned(device, offset, 0, len)) {
278 struct device *dev = device->dev; 280 struct device *dev = device->dev;
279 dma_addr_t *pq = &dma_src[disks-2]; 281 dma_addr_t *pq = &dma_src[disks-2];
280 int i; 282 int i;
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index db279872ef3d..b459a9034aac 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -193,7 +193,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
193 else if (sizeof(dma_addr_t) <= sizeof(struct page *)) 193 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
194 dma_src = (dma_addr_t *) src_list; 194 dma_src = (dma_addr_t *) src_list;
195 195
196 if (dma_src && chan) { 196 if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
197 /* run the xor asynchronously */ 197 /* run the xor asynchronously */
198 pr_debug("%s (async): len: %zu\n", __func__, len); 198 pr_debug("%s (async): len: %zu\n", __func__, len);
199 199
@@ -265,7 +265,8 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
265 else if (sizeof(dma_addr_t) <= sizeof(struct page *)) 265 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
266 dma_src = (dma_addr_t *) src_list; 266 dma_src = (dma_addr_t *) src_list;
267 267
268 if (dma_src && device && src_cnt <= device->max_xor) { 268 if (dma_src && device && src_cnt <= device->max_xor &&
269 is_dma_xor_aligned(device, offset, 0, len)) {
269 unsigned long dma_prep_flags = 0; 270 unsigned long dma_prep_flags = 0;
270 int i; 271 int i;
271 272
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 58e49e41c7a3..a3722a7384b5 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -288,6 +288,7 @@ static int dmatest_func(void *data)
288 dma_addr_t dma_dsts[dst_cnt]; 288 dma_addr_t dma_dsts[dst_cnt];
289 struct completion cmp; 289 struct completion cmp;
290 unsigned long tmo = msecs_to_jiffies(3000); 290 unsigned long tmo = msecs_to_jiffies(3000);
291 u8 align = 0;
291 292
292 total_tests++; 293 total_tests++;
293 294
@@ -295,6 +296,18 @@ static int dmatest_func(void *data)
295 src_off = dmatest_random() % (test_buf_size - len + 1); 296 src_off = dmatest_random() % (test_buf_size - len + 1);
296 dst_off = dmatest_random() % (test_buf_size - len + 1); 297 dst_off = dmatest_random() % (test_buf_size - len + 1);
297 298
299 /* honor alignment restrictions */
300 if (thread->type == DMA_MEMCPY)
301 align = dev->copy_align;
302 else if (thread->type == DMA_XOR)
303 align = dev->xor_align;
304 else if (thread->type == DMA_PQ)
305 align = dev->pq_align;
306
307 len = (len >> align) << align;
308 src_off = (src_off >> align) << align;
309 dst_off = (dst_off >> align) << align;
310
298 dmatest_init_srcs(thread->srcs, src_off, len); 311 dmatest_init_srcs(thread->srcs, src_off, len);
299 dmatest_init_dsts(thread->dsts, dst_off, len); 312 dmatest_init_dsts(thread->dsts, dst_off, len);
300 313
@@ -311,6 +324,7 @@ static int dmatest_func(void *data)
311 DMA_BIDIRECTIONAL); 324 DMA_BIDIRECTIONAL);
312 } 325 }
313 326
327
314 if (thread->type == DMA_MEMCPY) 328 if (thread->type == DMA_MEMCPY)
315 tx = dev->device_prep_dma_memcpy(chan, 329 tx = dev->device_prep_dma_memcpy(chan,
316 dma_dsts[0] + dst_off, 330 dma_dsts[0] + dst_off,
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index db23fd583f98..835b9c7bf1c2 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -245,6 +245,10 @@ struct dma_async_tx_descriptor {
245 * @cap_mask: one or more dma_capability flags 245 * @cap_mask: one or more dma_capability flags
246 * @max_xor: maximum number of xor sources, 0 if no capability 246 * @max_xor: maximum number of xor sources, 0 if no capability
247 * @max_pq: maximum number of PQ sources and PQ-continue capability 247 * @max_pq: maximum number of PQ sources and PQ-continue capability
248 * @copy_align: alignment shift for memcpy operations
249 * @xor_align: alignment shift for xor operations
250 * @pq_align: alignment shift for pq operations
251 * @fill_align: alignment shift for memset operations
248 * @dev_id: unique device ID 252 * @dev_id: unique device ID
249 * @dev: struct device reference for dma mapping api 253 * @dev: struct device reference for dma mapping api
250 * @device_alloc_chan_resources: allocate resources and return the 254 * @device_alloc_chan_resources: allocate resources and return the
@@ -271,6 +275,10 @@ struct dma_device {
271 dma_cap_mask_t cap_mask; 275 dma_cap_mask_t cap_mask;
272 unsigned short max_xor; 276 unsigned short max_xor;
273 unsigned short max_pq; 277 unsigned short max_pq;
278 u8 copy_align;
279 u8 xor_align;
280 u8 pq_align;
281 u8 fill_align;
274 #define DMA_HAS_PQ_CONTINUE (1 << 15) 282 #define DMA_HAS_PQ_CONTINUE (1 << 15)
275 283
276 int dev_id; 284 int dev_id;
@@ -314,6 +322,42 @@ struct dma_device {
314 void (*device_issue_pending)(struct dma_chan *chan); 322 void (*device_issue_pending)(struct dma_chan *chan);
315}; 323};
316 324
325static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
326{
327 size_t mask;
328
329 if (!align)
330 return true;
331 mask = (1 << align) - 1;
332 if (mask & (off1 | off2 | len))
333 return false;
334 return true;
335}
336
337static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
338 size_t off2, size_t len)
339{
340 return dmaengine_check_align(dev->copy_align, off1, off2, len);
341}
342
343static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
344 size_t off2, size_t len)
345{
346 return dmaengine_check_align(dev->xor_align, off1, off2, len);
347}
348
349static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
350 size_t off2, size_t len)
351{
352 return dmaengine_check_align(dev->pq_align, off1, off2, len);
353}
354
355static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
356 size_t off2, size_t len)
357{
358 return dmaengine_check_align(dev->fill_align, off1, off2, len);
359}
360
317static inline void 361static inline void
318dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) 362dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
319{ 363{