aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2008-02-02 21:49:58 -0500
committerDan Williams <dan.j.williams@intel.com>2008-02-06 12:12:18 -0500
commitd4c56f97ff21df405d0cebe11f49e3c3c79662b5 (patch)
treee6b0de433d7c985982ac12815998242a786d87b2
parent0036731c88fdb5bf4f04a796a30b5e445fc57f54 (diff)
async_tx: replace 'int_en' with operation preparation flags
Pass a full set of flags to drivers' per-operation 'prep' routines. Currently the only flag passed is DMA_PREP_INTERRUPT. The expectation is that arch-specific async_tx_find_channel() implementations can exploit this capability to find the best channel for an operation. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Shannon Nelson <shannon.nelson@intel.com> Reviewed-by: Haavard Skinnemoen <hskinnemoen@atmel.com>
-rw-r--r--crypto/async_tx/async_memcpy.c3
-rw-r--r--crypto/async_tx/async_memset.c3
-rw-r--r--crypto/async_tx/async_xor.c10
-rw-r--r--drivers/dma/ioat_dma.c4
-rw-r--r--drivers/dma/iop-adma.c20
-rw-r--r--include/asm-arm/arch-iop13xx/adma.h18
-rw-r--r--include/asm-arm/hardware/iop3xx-adma.h30
-rw-r--r--include/linux/dmaengine.h17
8 files changed, 62 insertions, 43 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index faca0bc52068..25dcf33bbc2d 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -52,6 +52,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
52 52
53 if (device) { 53 if (device) {
54 dma_addr_t dma_dest, dma_src; 54 dma_addr_t dma_dest, dma_src;
55 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
55 56
56 dma_dest = dma_map_page(device->dev, dest, dest_offset, len, 57 dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
57 DMA_FROM_DEVICE); 58 DMA_FROM_DEVICE);
@@ -60,7 +61,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
60 DMA_TO_DEVICE); 61 DMA_TO_DEVICE);
61 62
62 tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, 63 tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
63 len, cb_fn != NULL); 64 len, dma_prep_flags);
64 } 65 }
65 66
66 if (tx) { 67 if (tx) {
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 0c94851cfd37..8e98ab0cd37c 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -52,12 +52,13 @@ async_memset(struct page *dest, int val, unsigned int offset,
52 52
53 if (device) { 53 if (device) {
54 dma_addr_t dma_dest; 54 dma_addr_t dma_dest;
55 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
55 56
56 dma_dest = dma_map_page(device->dev, dest, offset, len, 57 dma_dest = dma_map_page(device->dev, dest, offset, len,
57 DMA_FROM_DEVICE); 58 DMA_FROM_DEVICE);
58 59
59 tx = device->device_prep_dma_memset(chan, dma_dest, val, len, 60 tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
60 cb_fn != NULL); 61 dma_prep_flags);
61 } 62 }
62 63
63 if (tx) { 64 if (tx) {
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 12cba1a4205b..68d2fe4465d8 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -45,6 +45,7 @@ do_async_xor(struct dma_device *device,
45 dma_addr_t *dma_src = (dma_addr_t *) src_list; 45 dma_addr_t *dma_src = (dma_addr_t *) src_list;
46 struct dma_async_tx_descriptor *tx; 46 struct dma_async_tx_descriptor *tx;
47 int i; 47 int i;
48 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
48 49
49 pr_debug("%s: len: %zu\n", __FUNCTION__, len); 50 pr_debug("%s: len: %zu\n", __FUNCTION__, len);
50 51
@@ -60,7 +61,7 @@ do_async_xor(struct dma_device *device,
60 * in case they can not provide a descriptor 61 * in case they can not provide a descriptor
61 */ 62 */
62 tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len, 63 tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
63 cb_fn != NULL); 64 dma_prep_flags);
64 if (!tx) { 65 if (!tx) {
65 if (depend_tx) 66 if (depend_tx)
66 dma_wait_for_async_tx(depend_tx); 67 dma_wait_for_async_tx(depend_tx);
@@ -68,7 +69,7 @@ do_async_xor(struct dma_device *device,
68 while (!tx) 69 while (!tx)
69 tx = device->device_prep_dma_xor(chan, dma_dest, 70 tx = device->device_prep_dma_xor(chan, dma_dest,
70 dma_src, src_cnt, len, 71 dma_src, src_cnt, len,
71 cb_fn != NULL); 72 dma_prep_flags);
72 } 73 }
73 74
74 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 75 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
@@ -268,6 +269,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
268 269
269 if (device) { 270 if (device) {
270 dma_addr_t *dma_src = (dma_addr_t *) src_list; 271 dma_addr_t *dma_src = (dma_addr_t *) src_list;
272 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
271 int i; 273 int i;
272 274
273 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 275 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
@@ -278,7 +280,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
278 280
279 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, 281 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
280 len, result, 282 len, result,
281 cb_fn != NULL); 283 dma_prep_flags);
282 if (!tx) { 284 if (!tx) {
283 if (depend_tx) 285 if (depend_tx)
284 dma_wait_for_async_tx(depend_tx); 286 dma_wait_for_async_tx(depend_tx);
@@ -286,7 +288,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
286 while (!tx) 288 while (!tx)
287 tx = device->device_prep_dma_zero_sum(chan, 289 tx = device->device_prep_dma_zero_sum(chan,
288 dma_src, src_cnt, len, result, 290 dma_src, src_cnt, len, result,
289 cb_fn != NULL); 291 dma_prep_flags);
290 } 292 }
291 293
292 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 294 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 5bcfc55a2776..dff38accc5c1 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -701,7 +701,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
701 dma_addr_t dma_dest, 701 dma_addr_t dma_dest,
702 dma_addr_t dma_src, 702 dma_addr_t dma_src,
703 size_t len, 703 size_t len,
704 int int_en) 704 unsigned long flags)
705{ 705{
706 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 706 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
707 struct ioat_desc_sw *new; 707 struct ioat_desc_sw *new;
@@ -724,7 +724,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
724 dma_addr_t dma_dest, 724 dma_addr_t dma_dest,
725 dma_addr_t dma_src, 725 dma_addr_t dma_src,
726 size_t len, 726 size_t len,
727 int int_en) 727 unsigned long flags)
728{ 728{
729 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 729 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
730 struct ioat_desc_sw *new; 730 struct ioat_desc_sw *new;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index eda841c60690..3986d54492bd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -537,7 +537,7 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan)
537 537
538static struct dma_async_tx_descriptor * 538static struct dma_async_tx_descriptor *
539iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, 539iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
540 dma_addr_t dma_src, size_t len, int int_en) 540 dma_addr_t dma_src, size_t len, unsigned long flags)
541{ 541{
542 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 542 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
543 struct iop_adma_desc_slot *sw_desc, *grp_start; 543 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -555,7 +555,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
555 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 555 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
556 if (sw_desc) { 556 if (sw_desc) {
557 grp_start = sw_desc->group_head; 557 grp_start = sw_desc->group_head;
558 iop_desc_init_memcpy(grp_start, int_en); 558 iop_desc_init_memcpy(grp_start, flags);
559 iop_desc_set_byte_count(grp_start, iop_chan, len); 559 iop_desc_set_byte_count(grp_start, iop_chan, len);
560 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 560 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
561 iop_desc_set_memcpy_src_addr(grp_start, dma_src); 561 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
@@ -569,7 +569,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
569 569
570static struct dma_async_tx_descriptor * 570static struct dma_async_tx_descriptor *
571iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, 571iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
572 int value, size_t len, int int_en) 572 int value, size_t len, unsigned long flags)
573{ 573{
574 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 574 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
575 struct iop_adma_desc_slot *sw_desc, *grp_start; 575 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -587,7 +587,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
587 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 587 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
588 if (sw_desc) { 588 if (sw_desc) {
589 grp_start = sw_desc->group_head; 589 grp_start = sw_desc->group_head;
590 iop_desc_init_memset(grp_start, int_en); 590 iop_desc_init_memset(grp_start, flags);
591 iop_desc_set_byte_count(grp_start, iop_chan, len); 591 iop_desc_set_byte_count(grp_start, iop_chan, len);
592 iop_desc_set_block_fill_val(grp_start, value); 592 iop_desc_set_block_fill_val(grp_start, value);
593 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 593 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
@@ -602,7 +602,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
602static struct dma_async_tx_descriptor * 602static struct dma_async_tx_descriptor *
603iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, 603iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
604 dma_addr_t *dma_src, unsigned int src_cnt, size_t len, 604 dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
605 int int_en) 605 unsigned long flags)
606{ 606{
607 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 607 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
608 struct iop_adma_desc_slot *sw_desc, *grp_start; 608 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -613,15 +613,15 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
613 BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); 613 BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
614 614
615 dev_dbg(iop_chan->device->common.dev, 615 dev_dbg(iop_chan->device->common.dev,
616 "%s src_cnt: %d len: %u int_en: %d\n", 616 "%s src_cnt: %d len: %u flags: %lx\n",
617 __FUNCTION__, src_cnt, len, int_en); 617 __FUNCTION__, src_cnt, len, flags);
618 618
619 spin_lock_bh(&iop_chan->lock); 619 spin_lock_bh(&iop_chan->lock);
620 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); 620 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
621 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 621 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
622 if (sw_desc) { 622 if (sw_desc) {
623 grp_start = sw_desc->group_head; 623 grp_start = sw_desc->group_head;
624 iop_desc_init_xor(grp_start, src_cnt, int_en); 624 iop_desc_init_xor(grp_start, src_cnt, flags);
625 iop_desc_set_byte_count(grp_start, iop_chan, len); 625 iop_desc_set_byte_count(grp_start, iop_chan, len);
626 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 626 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
627 sw_desc->unmap_src_cnt = src_cnt; 627 sw_desc->unmap_src_cnt = src_cnt;
@@ -638,7 +638,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
638static struct dma_async_tx_descriptor * 638static struct dma_async_tx_descriptor *
639iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, 639iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
640 unsigned int src_cnt, size_t len, u32 *result, 640 unsigned int src_cnt, size_t len, u32 *result,
641 int int_en) 641 unsigned long flags)
642{ 642{
643 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 643 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
644 struct iop_adma_desc_slot *sw_desc, *grp_start; 644 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -655,7 +655,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
655 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 655 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
656 if (sw_desc) { 656 if (sw_desc) {
657 grp_start = sw_desc->group_head; 657 grp_start = sw_desc->group_head;
658 iop_desc_init_zero_sum(grp_start, src_cnt, int_en); 658 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
659 iop_desc_set_zero_sum_byte_count(grp_start, len); 659 iop_desc_set_zero_sum_byte_count(grp_start, len);
660 grp_start->xor_check_result = result; 660 grp_start->xor_check_result = result;
661 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 661 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h
index 04006c1c5fd7..efd9a5eb1008 100644
--- a/include/asm-arm/arch-iop13xx/adma.h
+++ b/include/asm-arm/arch-iop13xx/adma.h
@@ -247,7 +247,7 @@ static inline u32 iop_desc_get_src_count(struct iop_adma_desc_slot *desc,
247} 247}
248 248
249static inline void 249static inline void
250iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en) 250iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
251{ 251{
252 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; 252 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
253 union { 253 union {
@@ -257,13 +257,13 @@ iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
257 257
258 u_desc_ctrl.value = 0; 258 u_desc_ctrl.value = 0;
259 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ 259 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
260 u_desc_ctrl.field.int_en = int_en; 260 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
261 hw_desc->desc_ctrl = u_desc_ctrl.value; 261 hw_desc->desc_ctrl = u_desc_ctrl.value;
262 hw_desc->crc_addr = 0; 262 hw_desc->crc_addr = 0;
263} 263}
264 264
265static inline void 265static inline void
266iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en) 266iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
267{ 267{
268 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; 268 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
269 union { 269 union {
@@ -274,14 +274,15 @@ iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
274 u_desc_ctrl.value = 0; 274 u_desc_ctrl.value = 0;
275 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ 275 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
276 u_desc_ctrl.field.block_fill_en = 1; 276 u_desc_ctrl.field.block_fill_en = 1;
277 u_desc_ctrl.field.int_en = int_en; 277 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
278 hw_desc->desc_ctrl = u_desc_ctrl.value; 278 hw_desc->desc_ctrl = u_desc_ctrl.value;
279 hw_desc->crc_addr = 0; 279 hw_desc->crc_addr = 0;
280} 280}
281 281
282/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */ 282/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */
283static inline void 283static inline void
284iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) 284iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
285 unsigned long flags)
285{ 286{
286 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; 287 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
287 union { 288 union {
@@ -292,7 +293,7 @@ iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
292 u_desc_ctrl.value = 0; 293 u_desc_ctrl.value = 0;
293 u_desc_ctrl.field.src_select = src_cnt - 1; 294 u_desc_ctrl.field.src_select = src_cnt - 1;
294 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ 295 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
295 u_desc_ctrl.field.int_en = int_en; 296 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
296 hw_desc->desc_ctrl = u_desc_ctrl.value; 297 hw_desc->desc_ctrl = u_desc_ctrl.value;
297 hw_desc->crc_addr = 0; 298 hw_desc->crc_addr = 0;
298 299
@@ -301,7 +302,8 @@ iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
301 302
302/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */ 303/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */
303static inline int 304static inline int
304iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) 305iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
306 unsigned long flags)
305{ 307{
306 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; 308 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
307 union { 309 union {
@@ -314,7 +316,7 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
314 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ 316 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
315 u_desc_ctrl.field.zero_result = 1; 317 u_desc_ctrl.field.zero_result = 1;
316 u_desc_ctrl.field.status_write_back_en = 1; 318 u_desc_ctrl.field.status_write_back_en = 1;
317 u_desc_ctrl.field.int_en = int_en; 319 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
318 hw_desc->desc_ctrl = u_desc_ctrl.value; 320 hw_desc->desc_ctrl = u_desc_ctrl.value;
319 hw_desc->crc_addr = 0; 321 hw_desc->crc_addr = 0;
320 322
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h
index 10834b54f681..5c529e6a5e3b 100644
--- a/include/asm-arm/hardware/iop3xx-adma.h
+++ b/include/asm-arm/hardware/iop3xx-adma.h
@@ -414,7 +414,7 @@ static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
414} 414}
415 415
416static inline void 416static inline void
417iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en) 417iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
418{ 418{
419 struct iop3xx_desc_dma *hw_desc = desc->hw_desc; 419 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
420 union { 420 union {
@@ -425,14 +425,14 @@ iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
425 u_desc_ctrl.value = 0; 425 u_desc_ctrl.value = 0;
426 u_desc_ctrl.field.mem_to_mem_en = 1; 426 u_desc_ctrl.field.mem_to_mem_en = 1;
427 u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */ 427 u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
428 u_desc_ctrl.field.int_en = int_en; 428 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
429 hw_desc->desc_ctrl = u_desc_ctrl.value; 429 hw_desc->desc_ctrl = u_desc_ctrl.value;
430 hw_desc->upper_pci_src_addr = 0; 430 hw_desc->upper_pci_src_addr = 0;
431 hw_desc->crc_addr = 0; 431 hw_desc->crc_addr = 0;
432} 432}
433 433
434static inline void 434static inline void
435iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en) 435iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
436{ 436{
437 struct iop3xx_desc_aau *hw_desc = desc->hw_desc; 437 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
438 union { 438 union {
@@ -443,12 +443,13 @@ iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
443 u_desc_ctrl.value = 0; 443 u_desc_ctrl.value = 0;
444 u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */ 444 u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
445 u_desc_ctrl.field.dest_write_en = 1; 445 u_desc_ctrl.field.dest_write_en = 1;
446 u_desc_ctrl.field.int_en = int_en; 446 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
447 hw_desc->desc_ctrl = u_desc_ctrl.value; 447 hw_desc->desc_ctrl = u_desc_ctrl.value;
448} 448}
449 449
450static inline u32 450static inline u32
451iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en) 451iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
452 unsigned long flags)
452{ 453{
453 int i, shift; 454 int i, shift;
454 u32 edcr; 455 u32 edcr;
@@ -509,21 +510,23 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en)
509 510
510 u_desc_ctrl.field.dest_write_en = 1; 511 u_desc_ctrl.field.dest_write_en = 1;
511 u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */ 512 u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
512 u_desc_ctrl.field.int_en = int_en; 513 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
513 hw_desc->desc_ctrl = u_desc_ctrl.value; 514 hw_desc->desc_ctrl = u_desc_ctrl.value;
514 515
515 return u_desc_ctrl.value; 516 return u_desc_ctrl.value;
516} 517}
517 518
518static inline void 519static inline void
519iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) 520iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
521 unsigned long flags)
520{ 522{
521 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, int_en); 523 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
522} 524}
523 525
524/* return the number of operations */ 526/* return the number of operations */
525static inline int 527static inline int
526iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) 528iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
529 unsigned long flags)
527{ 530{
528 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; 531 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
529 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter; 532 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
@@ -538,10 +541,10 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
538 for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0; 541 for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
539 i += slots_per_op, j++) { 542 i += slots_per_op, j++) {
540 iter = iop_hw_desc_slot_idx(hw_desc, i); 543 iter = iop_hw_desc_slot_idx(hw_desc, i);
541 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, int_en); 544 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
542 u_desc_ctrl.field.dest_write_en = 0; 545 u_desc_ctrl.field.dest_write_en = 0;
543 u_desc_ctrl.field.zero_result_en = 1; 546 u_desc_ctrl.field.zero_result_en = 1;
544 u_desc_ctrl.field.int_en = int_en; 547 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
545 iter->desc_ctrl = u_desc_ctrl.value; 548 iter->desc_ctrl = u_desc_ctrl.value;
546 549
547 /* for the subsequent descriptors preserve the store queue 550 /* for the subsequent descriptors preserve the store queue
@@ -559,7 +562,8 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
559} 562}
560 563
561static inline void 564static inline void
562iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) 565iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
566 unsigned long flags)
563{ 567{
564 struct iop3xx_desc_aau *hw_desc = desc->hw_desc; 568 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
565 union { 569 union {
@@ -591,7 +595,7 @@ iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
591 } 595 }
592 596
593 u_desc_ctrl.field.dest_write_en = 0; 597 u_desc_ctrl.field.dest_write_en = 0;
594 u_desc_ctrl.field.int_en = int_en; 598 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
595 hw_desc->desc_ctrl = u_desc_ctrl.value; 599 hw_desc->desc_ctrl = u_desc_ctrl.value;
596} 600}
597 601
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index b0864f5b729d..acbb364674ff 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -95,6 +95,15 @@ enum dma_transaction_type {
95#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) 95#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
96 96
97/** 97/**
98 * enum dma_prep_flags - DMA flags to augment operation preparation
99 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
100 * this transaction
101 */
102enum dma_prep_flags {
103 DMA_PREP_INTERRUPT = (1 << 0),
104};
105
106/**
98 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. 107 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
99 * See linux/cpumask.h 108 * See linux/cpumask.h
100 */ 109 */
@@ -274,16 +283,16 @@ struct dma_device {
274 283
275 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 284 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
276 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 285 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
277 size_t len, int int_en); 286 size_t len, unsigned long flags);
278 struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 287 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
279 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 288 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
280 unsigned int src_cnt, size_t len, int int_en); 289 unsigned int src_cnt, size_t len, unsigned long flags);
281 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( 290 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)(
282 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 291 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
283 size_t len, u32 *result, int int_en); 292 size_t len, u32 *result, unsigned long flags);
284 struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 293 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
285 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 294 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
286 int int_en); 295 unsigned long flags);
287 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 296 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
288 struct dma_chan *chan); 297 struct dma_chan *chan);
289 298