diff options
author | Dan Williams <dan.j.williams@intel.com> | 2008-04-17 23:17:26 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2008-04-17 16:25:54 -0400 |
commit | 636bdeaa1243327501edfd2a597ed7443eb4239a (patch) | |
tree | 59b894f124e3664ea4a537d7c07c527abdb9c8da | |
parent | c4fe15541d0ef5cc8cc1ce43057663851f8fc387 (diff) |
dmaengine: ack to flags: make use of the unused bits in the 'ack' field
'ack' is currently a simple integer that flags whether or not a client is done
touching fields in the given descriptor. It is effectively just a single bit
of information. Converting this to a flags parameter allows the other bits to
be put to use to control completion actions, like dma-unmap, and capture
results, like xor-zero-sum == 0.
Changes are one of:
1/ convert all open-coded ->ack manipulations to use async_tx_ack
and async_tx_test_ack.
2/ set the ack bit at prep time where possible
3/ make drivers store the flags at prep time
4/ add flags to the device_prep_dma_interrupt prototype
Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 2 | ||||
-rw-r--r-- | crypto/async_tx/async_tx.c | 9 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 2 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 12 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 10 | ||||
-rw-r--r-- | drivers/dma/ioat_dma.c | 24 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 39 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 25 |
8 files changed, 69 insertions, 54 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 84caa4efc0d4..a5eda80e8427 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
@@ -77,7 +77,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
77 | /* if ack is already set then we cannot be sure | 77 | /* if ack is already set then we cannot be sure |
78 | * we are referring to the correct operation | 78 | * we are referring to the correct operation |
79 | */ | 79 | */ |
80 | BUG_ON(depend_tx->ack); | 80 | BUG_ON(async_tx_test_ack(depend_tx)); |
81 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 81 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) |
82 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 82 | panic("%s: DMA_ERROR waiting for depend_tx\n", |
83 | __func__); | 83 | __func__); |
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 69756164b61d..c6e772fc5ccd 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -446,7 +446,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
446 | * otherwise poll for completion | 446 | * otherwise poll for completion |
447 | */ | 447 | */ |
448 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | 448 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) |
449 | intr_tx = device->device_prep_dma_interrupt(chan); | 449 | intr_tx = device->device_prep_dma_interrupt(chan, 0); |
450 | else | 450 | else |
451 | intr_tx = NULL; | 451 | intr_tx = NULL; |
452 | 452 | ||
@@ -515,7 +515,8 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
515 | * 2/ dependencies are 1:1 i.e. two transactions can | 515 | * 2/ dependencies are 1:1 i.e. two transactions can |
516 | * not depend on the same parent | 516 | * not depend on the same parent |
517 | */ | 517 | */ |
518 | BUG_ON(depend_tx->ack || depend_tx->next || tx->parent); | 518 | BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || |
519 | tx->parent); | ||
519 | 520 | ||
520 | /* the lock prevents async_tx_run_dependencies from missing | 521 | /* the lock prevents async_tx_run_dependencies from missing |
521 | * the setting of ->next when ->parent != NULL | 522 | * the setting of ->next when ->parent != NULL |
@@ -594,7 +595,7 @@ async_trigger_callback(enum async_tx_flags flags, | |||
594 | if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | 595 | if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) |
595 | device = NULL; | 596 | device = NULL; |
596 | 597 | ||
597 | tx = device ? device->device_prep_dma_interrupt(chan) : NULL; | 598 | tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; |
598 | } else | 599 | } else |
599 | tx = NULL; | 600 | tx = NULL; |
600 | 601 | ||
@@ -610,7 +611,7 @@ async_trigger_callback(enum async_tx_flags flags, | |||
610 | /* if ack is already set then we cannot be sure | 611 | /* if ack is already set then we cannot be sure |
611 | * we are referring to the correct operation | 612 | * we are referring to the correct operation |
612 | */ | 613 | */ |
613 | BUG_ON(depend_tx->ack); | 614 | BUG_ON(async_tx_test_ack(depend_tx)); |
614 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 615 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) |
615 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 616 | panic("%s: DMA_ERROR waiting for depend_tx\n", |
616 | __func__); | 617 | __func__); |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 1c445c7bdab7..3a0dddca5a10 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -191,7 +191,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
191 | /* if ack is already set then we cannot be sure | 191 | /* if ack is already set then we cannot be sure |
192 | * we are referring to the correct operation | 192 | * we are referring to the correct operation |
193 | */ | 193 | */ |
194 | BUG_ON(depend_tx->ack); | 194 | BUG_ON(async_tx_test_ack(depend_tx)); |
195 | if (dma_wait_for_async_tx(depend_tx) == | 195 | if (dma_wait_for_async_tx(depend_tx) == |
196 | DMA_ERROR) | 196 | DMA_ERROR) |
197 | panic("%s: DMA_ERROR waiting for " | 197 | panic("%s: DMA_ERROR waiting for " |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index af6911a75dae..d6dc70fd7527 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -478,7 +478,8 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
478 | 478 | ||
479 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | 479 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
480 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | 480 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); |
481 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | 481 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
482 | DMA_CTRL_ACK); | ||
482 | 483 | ||
483 | if (!tx) { | 484 | if (!tx) { |
484 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 485 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); |
@@ -486,7 +487,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
486 | return -ENOMEM; | 487 | return -ENOMEM; |
487 | } | 488 | } |
488 | 489 | ||
489 | tx->ack = 1; | ||
490 | tx->callback = NULL; | 490 | tx->callback = NULL; |
491 | cookie = tx->tx_submit(tx); | 491 | cookie = tx->tx_submit(tx); |
492 | 492 | ||
@@ -524,7 +524,8 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
524 | 524 | ||
525 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | 525 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
526 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | 526 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); |
527 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | 527 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
528 | DMA_CTRL_ACK); | ||
528 | 529 | ||
529 | if (!tx) { | 530 | if (!tx) { |
530 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 531 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); |
@@ -532,7 +533,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
532 | return -ENOMEM; | 533 | return -ENOMEM; |
533 | } | 534 | } |
534 | 535 | ||
535 | tx->ack = 1; | ||
536 | tx->callback = NULL; | 536 | tx->callback = NULL; |
537 | cookie = tx->tx_submit(tx); | 537 | cookie = tx->tx_submit(tx); |
538 | 538 | ||
@@ -573,7 +573,8 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
573 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | 573 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
574 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, | 574 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, |
575 | DMA_FROM_DEVICE); | 575 | DMA_FROM_DEVICE); |
576 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | 576 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
577 | DMA_CTRL_ACK); | ||
577 | 578 | ||
578 | if (!tx) { | 579 | if (!tx) { |
579 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | 580 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); |
@@ -581,7 +582,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
581 | return -ENOMEM; | 582 | return -ENOMEM; |
582 | } | 583 | } |
583 | 584 | ||
584 | tx->ack = 1; | ||
585 | tx->callback = NULL; | 585 | tx->callback = NULL; |
586 | cookie = tx->tx_submit(tx); | 586 | cookie = tx->tx_submit(tx); |
587 | 587 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 95b36b7934a5..054eabffc185 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -412,7 +412,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *chan) | |||
412 | } | 412 | } |
413 | 413 | ||
414 | static struct dma_async_tx_descriptor * | 414 | static struct dma_async_tx_descriptor * |
415 | fsl_dma_prep_interrupt(struct dma_chan *chan) | 415 | fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) |
416 | { | 416 | { |
417 | struct fsl_dma_chan *fsl_chan; | 417 | struct fsl_dma_chan *fsl_chan; |
418 | struct fsl_desc_sw *new; | 418 | struct fsl_desc_sw *new; |
@@ -429,7 +429,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan) | |||
429 | } | 429 | } |
430 | 430 | ||
431 | new->async_tx.cookie = -EBUSY; | 431 | new->async_tx.cookie = -EBUSY; |
432 | new->async_tx.ack = 0; | 432 | new->async_tx.flags = flags; |
433 | 433 | ||
434 | /* Insert the link descriptor to the LD ring */ | 434 | /* Insert the link descriptor to the LD ring */ |
435 | list_add_tail(&new->node, &new->async_tx.tx_list); | 435 | list_add_tail(&new->node, &new->async_tx.tx_list); |
@@ -482,7 +482,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
482 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); | 482 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); |
483 | 483 | ||
484 | new->async_tx.cookie = 0; | 484 | new->async_tx.cookie = 0; |
485 | new->async_tx.ack = 1; | 485 | async_tx_ack(&new->async_tx); |
486 | 486 | ||
487 | prev = new; | 487 | prev = new; |
488 | len -= copy; | 488 | len -= copy; |
@@ -493,7 +493,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
493 | list_add_tail(&new->node, &first->async_tx.tx_list); | 493 | list_add_tail(&new->node, &first->async_tx.tx_list); |
494 | } while (len); | 494 | } while (len); |
495 | 495 | ||
496 | new->async_tx.ack = 0; /* client is in control of this ack */ | 496 | new->async_tx.flags = flags; /* client is in control of this ack */ |
497 | new->async_tx.cookie = -EBUSY; | 497 | new->async_tx.cookie = -EBUSY; |
498 | 498 | ||
499 | /* Set End-of-link to the last link descriptor of new list*/ | 499 | /* Set End-of-link to the last link descriptor of new list*/ |
@@ -874,7 +874,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | |||
874 | async_tx_ack(tx3); | 874 | async_tx_ack(tx3); |
875 | 875 | ||
876 | /* Interrupt tx test */ | 876 | /* Interrupt tx test */ |
877 | tx1 = fsl_dma_prep_interrupt(chan); | 877 | tx1 = fsl_dma_prep_interrupt(chan, 0); |
878 | async_tx_ack(tx1); | 878 | async_tx_ack(tx1); |
879 | cookie = fsl_dma_tx_submit(tx1); | 879 | cookie = fsl_dma_tx_submit(tx1); |
880 | 880 | ||
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 1517fe4e2d14..318e8a22d814 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -212,14 +212,14 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
212 | u32 copy; | 212 | u32 copy; |
213 | size_t len; | 213 | size_t len; |
214 | dma_addr_t src, dst; | 214 | dma_addr_t src, dst; |
215 | int orig_ack; | 215 | unsigned long orig_flags; |
216 | unsigned int desc_count = 0; | 216 | unsigned int desc_count = 0; |
217 | 217 | ||
218 | /* src and dest and len are stored in the initial descriptor */ | 218 | /* src and dest and len are stored in the initial descriptor */ |
219 | len = first->len; | 219 | len = first->len; |
220 | src = first->src; | 220 | src = first->src; |
221 | dst = first->dst; | 221 | dst = first->dst; |
222 | orig_ack = first->async_tx.ack; | 222 | orig_flags = first->async_tx.flags; |
223 | new = first; | 223 | new = first; |
224 | 224 | ||
225 | spin_lock_bh(&ioat_chan->desc_lock); | 225 | spin_lock_bh(&ioat_chan->desc_lock); |
@@ -228,7 +228,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
228 | do { | 228 | do { |
229 | copy = min_t(size_t, len, ioat_chan->xfercap); | 229 | copy = min_t(size_t, len, ioat_chan->xfercap); |
230 | 230 | ||
231 | new->async_tx.ack = 1; | 231 | async_tx_ack(&new->async_tx); |
232 | 232 | ||
233 | hw = new->hw; | 233 | hw = new->hw; |
234 | hw->size = copy; | 234 | hw->size = copy; |
@@ -264,7 +264,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
264 | } | 264 | } |
265 | 265 | ||
266 | new->tx_cnt = desc_count; | 266 | new->tx_cnt = desc_count; |
267 | new->async_tx.ack = orig_ack; /* client is in control of this ack */ | 267 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ |
268 | 268 | ||
269 | /* store the original values for use in later cleanup */ | 269 | /* store the original values for use in later cleanup */ |
270 | if (new != first) { | 270 | if (new != first) { |
@@ -304,14 +304,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
304 | u32 copy; | 304 | u32 copy; |
305 | size_t len; | 305 | size_t len; |
306 | dma_addr_t src, dst; | 306 | dma_addr_t src, dst; |
307 | int orig_ack; | 307 | unsigned long orig_flags; |
308 | unsigned int desc_count = 0; | 308 | unsigned int desc_count = 0; |
309 | 309 | ||
310 | /* src and dest and len are stored in the initial descriptor */ | 310 | /* src and dest and len are stored in the initial descriptor */ |
311 | len = first->len; | 311 | len = first->len; |
312 | src = first->src; | 312 | src = first->src; |
313 | dst = first->dst; | 313 | dst = first->dst; |
314 | orig_ack = first->async_tx.ack; | 314 | orig_flags = first->async_tx.flags; |
315 | new = first; | 315 | new = first; |
316 | 316 | ||
317 | /* | 317 | /* |
@@ -321,7 +321,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
321 | do { | 321 | do { |
322 | copy = min_t(size_t, len, ioat_chan->xfercap); | 322 | copy = min_t(size_t, len, ioat_chan->xfercap); |
323 | 323 | ||
324 | new->async_tx.ack = 1; | 324 | async_tx_ack(&new->async_tx); |
325 | 325 | ||
326 | hw = new->hw; | 326 | hw = new->hw; |
327 | hw->size = copy; | 327 | hw->size = copy; |
@@ -349,7 +349,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
349 | } | 349 | } |
350 | 350 | ||
351 | new->tx_cnt = desc_count; | 351 | new->tx_cnt = desc_count; |
352 | new->async_tx.ack = orig_ack; /* client is in control of this ack */ | 352 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ |
353 | 353 | ||
354 | /* store the original values for use in later cleanup */ | 354 | /* store the original values for use in later cleanup */ |
355 | if (new != first) { | 355 | if (new != first) { |
@@ -714,7 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
714 | new->len = len; | 714 | new->len = len; |
715 | new->dst = dma_dest; | 715 | new->dst = dma_dest; |
716 | new->src = dma_src; | 716 | new->src = dma_src; |
717 | new->async_tx.ack = 0; | 717 | new->async_tx.flags = flags; |
718 | return &new->async_tx; | 718 | return &new->async_tx; |
719 | } else | 719 | } else |
720 | return NULL; | 720 | return NULL; |
@@ -742,7 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
742 | new->len = len; | 742 | new->len = len; |
743 | new->dst = dma_dest; | 743 | new->dst = dma_dest; |
744 | new->src = dma_src; | 744 | new->src = dma_src; |
745 | new->async_tx.ack = 0; | 745 | new->async_tx.flags = flags; |
746 | return &new->async_tx; | 746 | return &new->async_tx; |
747 | } else | 747 | } else |
748 | return NULL; | 748 | return NULL; |
@@ -842,7 +842,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
842 | * a completed entry, but not the last, so clean | 842 | * a completed entry, but not the last, so clean |
843 | * up if the client is done with the descriptor | 843 | * up if the client is done with the descriptor |
844 | */ | 844 | */ |
845 | if (desc->async_tx.ack) { | 845 | if (async_tx_test_ack(&desc->async_tx)) { |
846 | list_del(&desc->node); | 846 | list_del(&desc->node); |
847 | list_add_tail(&desc->node, | 847 | list_add_tail(&desc->node, |
848 | &ioat_chan->free_desc); | 848 | &ioat_chan->free_desc); |
@@ -979,7 +979,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
979 | desc->hw->size = 0; | 979 | desc->hw->size = 0; |
980 | desc->hw->src_addr = 0; | 980 | desc->hw->src_addr = 0; |
981 | desc->hw->dst_addr = 0; | 981 | desc->hw->dst_addr = 0; |
982 | desc->async_tx.ack = 1; | 982 | async_tx_ack(&desc->async_tx); |
983 | switch (ioat_chan->device->version) { | 983 | switch (ioat_chan->device->version) { |
984 | case IOAT_VER_1_2: | 984 | case IOAT_VER_1_2: |
985 | desc->hw->next = 0; | 985 | desc->hw->next = 0; |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 93252294f32b..762b729672e0 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -111,7 +111,7 @@ iop_adma_clean_slot(struct iop_adma_desc_slot *desc, | |||
111 | /* the client is allowed to attach dependent operations | 111 | /* the client is allowed to attach dependent operations |
112 | * until 'ack' is set | 112 | * until 'ack' is set |
113 | */ | 113 | */ |
114 | if (!desc->async_tx.ack) | 114 | if (!async_tx_test_ack(&desc->async_tx)) |
115 | return 0; | 115 | return 0; |
116 | 116 | ||
117 | /* leave the last descriptor in the chain | 117 | /* leave the last descriptor in the chain |
@@ -148,7 +148,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
148 | "this_desc: %#x next_desc: %#x ack: %d\n", | 148 | "this_desc: %#x next_desc: %#x ack: %d\n", |
149 | iter->async_tx.cookie, iter->idx, busy, | 149 | iter->async_tx.cookie, iter->idx, busy, |
150 | iter->async_tx.phys, iop_desc_get_next_desc(iter), | 150 | iter->async_tx.phys, iop_desc_get_next_desc(iter), |
151 | iter->async_tx.ack); | 151 | async_tx_test_ack(&iter->async_tx)); |
152 | prefetch(_iter); | 152 | prefetch(_iter); |
153 | prefetch(&_iter->async_tx); | 153 | prefetch(&_iter->async_tx); |
154 | 154 | ||
@@ -338,9 +338,7 @@ retry: | |||
338 | 338 | ||
339 | /* pre-ack all but the last descriptor */ | 339 | /* pre-ack all but the last descriptor */ |
340 | if (num_slots != slots_per_op) | 340 | if (num_slots != slots_per_op) |
341 | iter->async_tx.ack = 1; | 341 | async_tx_ack(&iter->async_tx); |
342 | else | ||
343 | iter->async_tx.ack = 0; | ||
344 | 342 | ||
345 | list_add_tail(&iter->chain_node, &chain); | 343 | list_add_tail(&iter->chain_node, &chain); |
346 | alloc_tail = iter; | 344 | alloc_tail = iter; |
@@ -513,7 +511,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan) | |||
513 | } | 511 | } |
514 | 512 | ||
515 | static struct dma_async_tx_descriptor * | 513 | static struct dma_async_tx_descriptor * |
516 | iop_adma_prep_dma_interrupt(struct dma_chan *chan) | 514 | iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) |
517 | { | 515 | { |
518 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 516 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
519 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 517 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
@@ -528,6 +526,7 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan) | |||
528 | grp_start = sw_desc->group_head; | 526 | grp_start = sw_desc->group_head; |
529 | iop_desc_init_interrupt(grp_start, iop_chan); | 527 | iop_desc_init_interrupt(grp_start, iop_chan); |
530 | grp_start->unmap_len = 0; | 528 | grp_start->unmap_len = 0; |
529 | sw_desc->async_tx.flags = flags; | ||
531 | } | 530 | } |
532 | spin_unlock_bh(&iop_chan->lock); | 531 | spin_unlock_bh(&iop_chan->lock); |
533 | 532 | ||
@@ -560,6 +559,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
560 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); | 559 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); |
561 | sw_desc->unmap_src_cnt = 1; | 560 | sw_desc->unmap_src_cnt = 1; |
562 | sw_desc->unmap_len = len; | 561 | sw_desc->unmap_len = len; |
562 | sw_desc->async_tx.flags = flags; | ||
563 | } | 563 | } |
564 | spin_unlock_bh(&iop_chan->lock); | 564 | spin_unlock_bh(&iop_chan->lock); |
565 | 565 | ||
@@ -592,6 +592,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, | |||
592 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 592 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
593 | sw_desc->unmap_src_cnt = 1; | 593 | sw_desc->unmap_src_cnt = 1; |
594 | sw_desc->unmap_len = len; | 594 | sw_desc->unmap_len = len; |
595 | sw_desc->async_tx.flags = flags; | ||
595 | } | 596 | } |
596 | spin_unlock_bh(&iop_chan->lock); | 597 | spin_unlock_bh(&iop_chan->lock); |
597 | 598 | ||
@@ -625,6 +626,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
625 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 626 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
626 | sw_desc->unmap_src_cnt = src_cnt; | 627 | sw_desc->unmap_src_cnt = src_cnt; |
627 | sw_desc->unmap_len = len; | 628 | sw_desc->unmap_len = len; |
629 | sw_desc->async_tx.flags = flags; | ||
628 | while (src_cnt--) | 630 | while (src_cnt--) |
629 | iop_desc_set_xor_src_addr(grp_start, src_cnt, | 631 | iop_desc_set_xor_src_addr(grp_start, src_cnt, |
630 | dma_src[src_cnt]); | 632 | dma_src[src_cnt]); |
@@ -661,6 +663,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | |||
661 | __func__, grp_start->xor_check_result); | 663 | __func__, grp_start->xor_check_result); |
662 | sw_desc->unmap_src_cnt = src_cnt; | 664 | sw_desc->unmap_src_cnt = src_cnt; |
663 | sw_desc->unmap_len = len; | 665 | sw_desc->unmap_len = len; |
666 | sw_desc->async_tx.flags = flags; | ||
664 | while (src_cnt--) | 667 | while (src_cnt--) |
665 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, | 668 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, |
666 | dma_src[src_cnt]); | 669 | dma_src[src_cnt]); |
@@ -847,11 +850,11 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
847 | src_dma = dma_map_single(dma_chan->device->dev, src, | 850 | src_dma = dma_map_single(dma_chan->device->dev, src, |
848 | IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); | 851 | IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); |
849 | tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | 852 | tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, |
850 | IOP_ADMA_TEST_SIZE, 1); | 853 | IOP_ADMA_TEST_SIZE, |
854 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
851 | 855 | ||
852 | cookie = iop_adma_tx_submit(tx); | 856 | cookie = iop_adma_tx_submit(tx); |
853 | iop_adma_issue_pending(dma_chan); | 857 | iop_adma_issue_pending(dma_chan); |
854 | async_tx_ack(tx); | ||
855 | msleep(1); | 858 | msleep(1); |
856 | 859 | ||
857 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | 860 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != |
@@ -947,11 +950,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
947 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | 950 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], |
948 | 0, PAGE_SIZE, DMA_TO_DEVICE); | 951 | 0, PAGE_SIZE, DMA_TO_DEVICE); |
949 | tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 952 | tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
950 | IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1); | 953 | IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, |
954 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
951 | 955 | ||
952 | cookie = iop_adma_tx_submit(tx); | 956 | cookie = iop_adma_tx_submit(tx); |
953 | iop_adma_issue_pending(dma_chan); | 957 | iop_adma_issue_pending(dma_chan); |
954 | async_tx_ack(tx); | ||
955 | msleep(8); | 958 | msleep(8); |
956 | 959 | ||
957 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | 960 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != |
@@ -994,11 +997,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
994 | DMA_TO_DEVICE); | 997 | DMA_TO_DEVICE); |
995 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, | 998 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, |
996 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, | 999 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
997 | &zero_sum_result, 1); | 1000 | &zero_sum_result, |
1001 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
998 | 1002 | ||
999 | cookie = iop_adma_tx_submit(tx); | 1003 | cookie = iop_adma_tx_submit(tx); |
1000 | iop_adma_issue_pending(dma_chan); | 1004 | iop_adma_issue_pending(dma_chan); |
1001 | async_tx_ack(tx); | ||
1002 | msleep(8); | 1005 | msleep(8); |
1003 | 1006 | ||
1004 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | 1007 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
@@ -1018,11 +1021,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
1018 | /* test memset */ | 1021 | /* test memset */ |
1019 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, | 1022 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, |
1020 | PAGE_SIZE, DMA_FROM_DEVICE); | 1023 | PAGE_SIZE, DMA_FROM_DEVICE); |
1021 | tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1); | 1024 | tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, |
1025 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1022 | 1026 | ||
1023 | cookie = iop_adma_tx_submit(tx); | 1027 | cookie = iop_adma_tx_submit(tx); |
1024 | iop_adma_issue_pending(dma_chan); | 1028 | iop_adma_issue_pending(dma_chan); |
1025 | async_tx_ack(tx); | ||
1026 | msleep(8); | 1029 | msleep(8); |
1027 | 1030 | ||
1028 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | 1031 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
@@ -1050,11 +1053,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
1050 | DMA_TO_DEVICE); | 1053 | DMA_TO_DEVICE); |
1051 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, | 1054 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, |
1052 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, | 1055 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
1053 | &zero_sum_result, 1); | 1056 | &zero_sum_result, |
1057 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1054 | 1058 | ||
1055 | cookie = iop_adma_tx_submit(tx); | 1059 | cookie = iop_adma_tx_submit(tx); |
1056 | iop_adma_issue_pending(dma_chan); | 1060 | iop_adma_issue_pending(dma_chan); |
1057 | async_tx_ack(tx); | ||
1058 | msleep(8); | 1061 | msleep(8); |
1059 | 1062 | ||
1060 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | 1063 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
@@ -1287,7 +1290,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) | |||
1287 | grp_start = sw_desc->group_head; | 1290 | grp_start = sw_desc->group_head; |
1288 | 1291 | ||
1289 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); | 1292 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); |
1290 | sw_desc->async_tx.ack = 1; | 1293 | async_tx_ack(&sw_desc->async_tx); |
1291 | iop_desc_init_memcpy(grp_start, 0); | 1294 | iop_desc_init_memcpy(grp_start, 0); |
1292 | iop_desc_set_byte_count(grp_start, iop_chan, 0); | 1295 | iop_desc_set_byte_count(grp_start, iop_chan, 0); |
1293 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); | 1296 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); |
@@ -1343,7 +1346,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1343 | if (sw_desc) { | 1346 | if (sw_desc) { |
1344 | grp_start = sw_desc->group_head; | 1347 | grp_start = sw_desc->group_head; |
1345 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); | 1348 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); |
1346 | sw_desc->async_tx.ack = 1; | 1349 | async_tx_ack(&sw_desc->async_tx); |
1347 | iop_desc_init_null_xor(grp_start, 2, 0); | 1350 | iop_desc_init_null_xor(grp_start, 2, 0); |
1348 | iop_desc_set_byte_count(grp_start, iop_chan, 0); | 1351 | iop_desc_set_byte_count(grp_start, iop_chan, 0); |
1349 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); | 1352 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index cd34df78c6aa..b4d84ed6187d 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -95,12 +95,17 @@ enum dma_transaction_type { | |||
95 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) | 95 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) |
96 | 96 | ||
97 | /** | 97 | /** |
98 | * enum dma_prep_flags - DMA flags to augment operation preparation | 98 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
99 | * control completion, and communicate status. | ||
99 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of | 100 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of |
100 | * this transaction | 101 | * this transaction |
102 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client | ||
103 | * acknowledges receipt, i.e. has has a chance to establish any | ||
104 | * dependency chains | ||
101 | */ | 105 | */ |
102 | enum dma_prep_flags { | 106 | enum dma_ctrl_flags { |
103 | DMA_PREP_INTERRUPT = (1 << 0), | 107 | DMA_PREP_INTERRUPT = (1 << 0), |
108 | DMA_CTRL_ACK = (1 << 1), | ||
104 | }; | 109 | }; |
105 | 110 | ||
106 | /** | 111 | /** |
@@ -211,8 +216,8 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); | |||
211 | * ---dma generic offload fields--- | 216 | * ---dma generic offload fields--- |
212 | * @cookie: tracking cookie for this transaction, set to -EBUSY if | 217 | * @cookie: tracking cookie for this transaction, set to -EBUSY if |
213 | * this tx is sitting on a dependency list | 218 | * this tx is sitting on a dependency list |
214 | * @ack: the descriptor can not be reused until the client acknowledges | 219 | * @flags: flags to augment operation preparation, control completion, and |
215 | * receipt, i.e. has has a chance to establish any dependency chains | 220 | * communicate status |
216 | * @phys: physical address of the descriptor | 221 | * @phys: physical address of the descriptor |
217 | * @tx_list: driver common field for operations that require multiple | 222 | * @tx_list: driver common field for operations that require multiple |
218 | * descriptors | 223 | * descriptors |
@@ -227,7 +232,7 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); | |||
227 | */ | 232 | */ |
228 | struct dma_async_tx_descriptor { | 233 | struct dma_async_tx_descriptor { |
229 | dma_cookie_t cookie; | 234 | dma_cookie_t cookie; |
230 | int ack; | 235 | enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ |
231 | dma_addr_t phys; | 236 | dma_addr_t phys; |
232 | struct list_head tx_list; | 237 | struct list_head tx_list; |
233 | struct dma_chan *chan; | 238 | struct dma_chan *chan; |
@@ -290,7 +295,7 @@ struct dma_device { | |||
290 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | 295 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, |
291 | unsigned long flags); | 296 | unsigned long flags); |
292 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 297 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
293 | struct dma_chan *chan); | 298 | struct dma_chan *chan, unsigned long flags); |
294 | 299 | ||
295 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, | 300 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, |
296 | dma_cookie_t cookie, dma_cookie_t *last, | 301 | dma_cookie_t cookie, dma_cookie_t *last, |
@@ -316,7 +321,13 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
316 | static inline void | 321 | static inline void |
317 | async_tx_ack(struct dma_async_tx_descriptor *tx) | 322 | async_tx_ack(struct dma_async_tx_descriptor *tx) |
318 | { | 323 | { |
319 | tx->ack = 1; | 324 | tx->flags |= DMA_CTRL_ACK; |
325 | } | ||
326 | |||
327 | static inline int | ||
328 | async_tx_test_ack(struct dma_async_tx_descriptor *tx) | ||
329 | { | ||
330 | return tx->flags & DMA_CTRL_ACK; | ||
320 | } | 331 | } |
321 | 332 | ||
322 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) | 333 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) |