diff options
| -rw-r--r-- | crypto/async_tx/async_memcpy.c | 2 | ||||
| -rw-r--r-- | crypto/async_tx/async_tx.c | 202 | ||||
| -rw-r--r-- | crypto/async_tx/async_xor.c | 2 | ||||
| -rw-r--r-- | drivers/dma/Kconfig | 8 | ||||
| -rw-r--r-- | drivers/dma/dmaengine.c | 15 | ||||
| -rw-r--r-- | drivers/dma/fsldma.c | 57 | ||||
| -rw-r--r-- | drivers/dma/ioat_dma.c | 36 | ||||
| -rw-r--r-- | drivers/dma/iop-adma.c | 60 | ||||
| -rw-r--r-- | include/asm-arm/arch-iop13xx/adma.h | 5 | ||||
| -rw-r--r-- | include/asm-arm/hardware/iop3xx-adma.h | 8 | ||||
| -rw-r--r-- | include/asm-arm/hardware/iop_adma.h | 2 | ||||
| -rw-r--r-- | include/linux/dmaengine.h | 36 |
12 files changed, 261 insertions, 172 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 84caa4efc0d4..a5eda80e8427 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
| @@ -77,7 +77,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
| 77 | /* if ack is already set then we cannot be sure | 77 | /* if ack is already set then we cannot be sure |
| 78 | * we are referring to the correct operation | 78 | * we are referring to the correct operation |
| 79 | */ | 79 | */ |
| 80 | BUG_ON(depend_tx->ack); | 80 | BUG_ON(async_tx_test_ack(depend_tx)); |
| 81 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 81 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) |
| 82 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 82 | panic("%s: DMA_ERROR waiting for depend_tx\n", |
| 83 | __func__); | 83 | __func__); |
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 2be3bae89930..c6e772fc5ccd 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
| @@ -89,13 +89,19 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
| 89 | iter = tx; | 89 | iter = tx; |
| 90 | 90 | ||
| 91 | /* find the root of the unsubmitted dependency chain */ | 91 | /* find the root of the unsubmitted dependency chain */ |
| 92 | while (iter->cookie == -EBUSY) { | 92 | do { |
| 93 | parent = iter->parent; | 93 | parent = iter->parent; |
| 94 | if (parent && parent->cookie == -EBUSY) | 94 | if (!parent) |
| 95 | iter = iter->parent; | ||
| 96 | else | ||
| 97 | break; | 95 | break; |
| 98 | } | 96 | else |
| 97 | iter = parent; | ||
| 98 | } while (parent); | ||
| 99 | |||
| 100 | /* there is a small window for ->parent == NULL and | ||
| 101 | * ->cookie == -EBUSY | ||
| 102 | */ | ||
| 103 | while (iter->cookie == -EBUSY) | ||
| 104 | cpu_relax(); | ||
| 99 | 105 | ||
| 100 | status = dma_sync_wait(iter->chan, iter->cookie); | 106 | status = dma_sync_wait(iter->chan, iter->cookie); |
| 101 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | 107 | } while (status == DMA_IN_PROGRESS || (iter != tx)); |
| @@ -111,24 +117,33 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |||
| 111 | void | 117 | void |
| 112 | async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) | 118 | async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) |
| 113 | { | 119 | { |
| 114 | struct dma_async_tx_descriptor *dep_tx, *_dep_tx; | 120 | struct dma_async_tx_descriptor *next = tx->next; |
| 115 | struct dma_device *dev; | ||
| 116 | struct dma_chan *chan; | 121 | struct dma_chan *chan; |
| 117 | 122 | ||
| 118 | list_for_each_entry_safe(dep_tx, _dep_tx, &tx->depend_list, | 123 | if (!next) |
| 119 | depend_node) { | 124 | return; |
| 120 | chan = dep_tx->chan; | 125 | |
| 121 | dev = chan->device; | 126 | tx->next = NULL; |
| 122 | /* we can't depend on ourselves */ | 127 | chan = next->chan; |
| 123 | BUG_ON(chan == tx->chan); | 128 | |
| 124 | list_del(&dep_tx->depend_node); | 129 | /* keep submitting up until a channel switch is detected |
| 125 | tx->tx_submit(dep_tx); | 130 | * in that case we will be called again as a result of |
| 126 | 131 | * processing the interrupt from async_tx_channel_switch | |
| 127 | /* we need to poke the engine as client code does not | 132 | */ |
| 128 | * know about dependency submission events | 133 | while (next && next->chan == chan) { |
| 129 | */ | 134 | struct dma_async_tx_descriptor *_next; |
| 130 | dev->device_issue_pending(chan); | 135 | |
| 136 | spin_lock_bh(&next->lock); | ||
| 137 | next->parent = NULL; | ||
| 138 | _next = next->next; | ||
| 139 | next->next = NULL; | ||
| 140 | spin_unlock_bh(&next->lock); | ||
| 141 | |||
| 142 | next->tx_submit(next); | ||
| 143 | next = _next; | ||
| 131 | } | 144 | } |
| 145 | |||
| 146 | chan->device->device_issue_pending(chan); | ||
| 132 | } | 147 | } |
| 133 | EXPORT_SYMBOL_GPL(async_tx_run_dependencies); | 148 | EXPORT_SYMBOL_GPL(async_tx_run_dependencies); |
| 134 | 149 | ||
| @@ -397,6 +412,92 @@ static void __exit async_tx_exit(void) | |||
| 397 | } | 412 | } |
| 398 | #endif | 413 | #endif |
| 399 | 414 | ||
| 415 | |||
| 416 | /** | ||
| 417 | * async_tx_channel_switch - queue an interrupt descriptor with a dependency | ||
| 418 | * pre-attached. | ||
| 419 | * @depend_tx: the operation that must finish before the new operation runs | ||
| 420 | * @tx: the new operation | ||
| 421 | */ | ||
| 422 | static void | ||
| 423 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | ||
| 424 | struct dma_async_tx_descriptor *tx) | ||
| 425 | { | ||
| 426 | struct dma_chan *chan; | ||
| 427 | struct dma_device *device; | ||
| 428 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | ||
| 429 | |||
| 430 | /* first check to see if we can still append to depend_tx */ | ||
| 431 | spin_lock_bh(&depend_tx->lock); | ||
| 432 | if (depend_tx->parent && depend_tx->chan == tx->chan) { | ||
| 433 | tx->parent = depend_tx; | ||
| 434 | depend_tx->next = tx; | ||
| 435 | intr_tx = NULL; | ||
| 436 | } | ||
| 437 | spin_unlock_bh(&depend_tx->lock); | ||
| 438 | |||
| 439 | if (!intr_tx) | ||
| 440 | return; | ||
| 441 | |||
| 442 | chan = depend_tx->chan; | ||
| 443 | device = chan->device; | ||
| 444 | |||
| 445 | /* see if we can schedule an interrupt | ||
| 446 | * otherwise poll for completion | ||
| 447 | */ | ||
| 448 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | ||
| 449 | intr_tx = device->device_prep_dma_interrupt(chan, 0); | ||
| 450 | else | ||
| 451 | intr_tx = NULL; | ||
| 452 | |||
| 453 | if (intr_tx) { | ||
| 454 | intr_tx->callback = NULL; | ||
| 455 | intr_tx->callback_param = NULL; | ||
| 456 | tx->parent = intr_tx; | ||
| 457 | /* safe to set ->next outside the lock since we know we are | ||
| 458 | * not submitted yet | ||
| 459 | */ | ||
| 460 | intr_tx->next = tx; | ||
| 461 | |||
| 462 | /* check if we need to append */ | ||
| 463 | spin_lock_bh(&depend_tx->lock); | ||
| 464 | if (depend_tx->parent) { | ||
| 465 | intr_tx->parent = depend_tx; | ||
| 466 | depend_tx->next = intr_tx; | ||
| 467 | async_tx_ack(intr_tx); | ||
| 468 | intr_tx = NULL; | ||
| 469 | } | ||
| 470 | spin_unlock_bh(&depend_tx->lock); | ||
| 471 | |||
| 472 | if (intr_tx) { | ||
| 473 | intr_tx->parent = NULL; | ||
| 474 | intr_tx->tx_submit(intr_tx); | ||
| 475 | async_tx_ack(intr_tx); | ||
| 476 | } | ||
| 477 | } else { | ||
| 478 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | ||
| 479 | panic("%s: DMA_ERROR waiting for depend_tx\n", | ||
| 480 | __func__); | ||
| 481 | tx->tx_submit(tx); | ||
| 482 | } | ||
| 483 | } | ||
| 484 | |||
| 485 | |||
| 486 | /** | ||
| 487 | * submit_disposition - while holding depend_tx->lock we must avoid submitting | ||
| 488 | * new operations to prevent a circular locking dependency with | ||
| 489 | * drivers that already hold a channel lock when calling | ||
| 490 | * async_tx_run_dependencies. | ||
| 491 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock | ||
| 492 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch | ||
| 493 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly | ||
| 494 | */ | ||
| 495 | enum submit_disposition { | ||
| 496 | ASYNC_TX_SUBMITTED, | ||
| 497 | ASYNC_TX_CHANNEL_SWITCH, | ||
| 498 | ASYNC_TX_DIRECT_SUBMIT, | ||
| 499 | }; | ||
| 500 | |||
| 400 | void | 501 | void |
| 401 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | 502 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, |
| 402 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 503 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, |
| @@ -405,28 +506,55 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
| 405 | tx->callback = cb_fn; | 506 | tx->callback = cb_fn; |
| 406 | tx->callback_param = cb_param; | 507 | tx->callback_param = cb_param; |
| 407 | 508 | ||
| 408 | /* set this new tx to run after depend_tx if: | 509 | if (depend_tx) { |
| 409 | * 1/ a dependency exists (depend_tx is !NULL) | 510 | enum submit_disposition s; |
| 410 | * 2/ the tx can not be submitted to the current channel | 511 | |
| 411 | */ | 512 | /* sanity check the dependency chain: |
| 412 | if (depend_tx && depend_tx->chan != chan) { | 513 | * 1/ if ack is already set then we cannot be sure |
| 413 | /* if ack is already set then we cannot be sure | ||
| 414 | * we are referring to the correct operation | 514 | * we are referring to the correct operation |
| 515 | * 2/ dependencies are 1:1 i.e. two transactions can | ||
| 516 | * not depend on the same parent | ||
| 415 | */ | 517 | */ |
| 416 | BUG_ON(depend_tx->ack); | 518 | BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || |
| 519 | tx->parent); | ||
| 417 | 520 | ||
| 418 | tx->parent = depend_tx; | 521 | /* the lock prevents async_tx_run_dependencies from missing |
| 522 | * the setting of ->next when ->parent != NULL | ||
| 523 | */ | ||
| 419 | spin_lock_bh(&depend_tx->lock); | 524 | spin_lock_bh(&depend_tx->lock); |
| 420 | list_add_tail(&tx->depend_node, &depend_tx->depend_list); | 525 | if (depend_tx->parent) { |
| 421 | if (depend_tx->cookie == 0) { | 526 | /* we have a parent so we can not submit directly |
| 422 | struct dma_chan *dep_chan = depend_tx->chan; | 527 | * if we are staying on the same channel: append |
| 423 | struct dma_device *dep_dev = dep_chan->device; | 528 | * else: channel switch |
| 424 | dep_dev->device_dependency_added(dep_chan); | 529 | */ |
| 530 | if (depend_tx->chan == chan) { | ||
| 531 | tx->parent = depend_tx; | ||
| 532 | depend_tx->next = tx; | ||
| 533 | s = ASYNC_TX_SUBMITTED; | ||
| 534 | } else | ||
| 535 | s = ASYNC_TX_CHANNEL_SWITCH; | ||
| 536 | } else { | ||
| 537 | /* we do not have a parent so we may be able to submit | ||
| 538 | * directly if we are staying on the same channel | ||
| 539 | */ | ||
| 540 | if (depend_tx->chan == chan) | ||
| 541 | s = ASYNC_TX_DIRECT_SUBMIT; | ||
| 542 | else | ||
| 543 | s = ASYNC_TX_CHANNEL_SWITCH; | ||
| 425 | } | 544 | } |
| 426 | spin_unlock_bh(&depend_tx->lock); | 545 | spin_unlock_bh(&depend_tx->lock); |
| 427 | 546 | ||
| 428 | /* schedule an interrupt to trigger the channel switch */ | 547 | switch (s) { |
| 429 | async_trigger_callback(ASYNC_TX_ACK, depend_tx, NULL, NULL); | 548 | case ASYNC_TX_SUBMITTED: |
| 549 | break; | ||
| 550 | case ASYNC_TX_CHANNEL_SWITCH: | ||
| 551 | async_tx_channel_switch(depend_tx, tx); | ||
| 552 | break; | ||
| 553 | case ASYNC_TX_DIRECT_SUBMIT: | ||
| 554 | tx->parent = NULL; | ||
| 555 | tx->tx_submit(tx); | ||
| 556 | break; | ||
| 557 | } | ||
| 430 | } else { | 558 | } else { |
| 431 | tx->parent = NULL; | 559 | tx->parent = NULL; |
| 432 | tx->tx_submit(tx); | 560 | tx->tx_submit(tx); |
| @@ -467,7 +595,7 @@ async_trigger_callback(enum async_tx_flags flags, | |||
| 467 | if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | 595 | if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) |
| 468 | device = NULL; | 596 | device = NULL; |
| 469 | 597 | ||
| 470 | tx = device ? device->device_prep_dma_interrupt(chan) : NULL; | 598 | tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; |
| 471 | } else | 599 | } else |
| 472 | tx = NULL; | 600 | tx = NULL; |
| 473 | 601 | ||
| @@ -483,7 +611,7 @@ async_trigger_callback(enum async_tx_flags flags, | |||
| 483 | /* if ack is already set then we cannot be sure | 611 | /* if ack is already set then we cannot be sure |
| 484 | * we are referring to the correct operation | 612 | * we are referring to the correct operation |
| 485 | */ | 613 | */ |
| 486 | BUG_ON(depend_tx->ack); | 614 | BUG_ON(async_tx_test_ack(depend_tx)); |
| 487 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 615 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) |
| 488 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 616 | panic("%s: DMA_ERROR waiting for depend_tx\n", |
| 489 | __func__); | 617 | __func__); |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 1c445c7bdab7..3a0dddca5a10 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
| @@ -191,7 +191,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
| 191 | /* if ack is already set then we cannot be sure | 191 | /* if ack is already set then we cannot be sure |
| 192 | * we are referring to the correct operation | 192 | * we are referring to the correct operation |
| 193 | */ | 193 | */ |
| 194 | BUG_ON(depend_tx->ack); | 194 | BUG_ON(async_tx_test_ack(depend_tx)); |
| 195 | if (dma_wait_for_async_tx(depend_tx) == | 195 | if (dma_wait_for_async_tx(depend_tx) == |
| 196 | DMA_ERROR) | 196 | DMA_ERROR) |
| 197 | panic("%s: DMA_ERROR waiting for " | 197 | panic("%s: DMA_ERROR waiting for " |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 27340a7b19dd..6239c3df30ac 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -46,14 +46,6 @@ config FSL_DMA | |||
| 46 | MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. | 46 | MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. |
| 47 | The MPC8349, MPC8360 is also supported. | 47 | The MPC8349, MPC8360 is also supported. |
| 48 | 48 | ||
| 49 | config FSL_DMA_SELFTEST | ||
| 50 | bool "Enable the self test for each DMA channel" | ||
| 51 | depends on FSL_DMA | ||
| 52 | default y | ||
| 53 | ---help--- | ||
| 54 | Enable the self test for each DMA channel. A self test will be | ||
| 55 | performed after the channel probed to ensure the DMA works well. | ||
| 56 | |||
| 57 | config DMA_ENGINE | 49 | config DMA_ENGINE |
| 58 | bool | 50 | bool |
| 59 | 51 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8db0e7f9d3f4..d6dc70fd7527 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
| @@ -362,7 +362,6 @@ int dma_async_device_register(struct dma_device *device) | |||
| 362 | 362 | ||
| 363 | BUG_ON(!device->device_alloc_chan_resources); | 363 | BUG_ON(!device->device_alloc_chan_resources); |
| 364 | BUG_ON(!device->device_free_chan_resources); | 364 | BUG_ON(!device->device_free_chan_resources); |
| 365 | BUG_ON(!device->device_dependency_added); | ||
| 366 | BUG_ON(!device->device_is_tx_complete); | 365 | BUG_ON(!device->device_is_tx_complete); |
| 367 | BUG_ON(!device->device_issue_pending); | 366 | BUG_ON(!device->device_issue_pending); |
| 368 | BUG_ON(!device->dev); | 367 | BUG_ON(!device->dev); |
| @@ -479,7 +478,8 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
| 479 | 478 | ||
| 480 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | 479 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
| 481 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | 480 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); |
| 482 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | 481 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
| 482 | DMA_CTRL_ACK); | ||
| 483 | 483 | ||
| 484 | if (!tx) { | 484 | if (!tx) { |
| 485 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 485 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); |
| @@ -487,7 +487,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
| 487 | return -ENOMEM; | 487 | return -ENOMEM; |
| 488 | } | 488 | } |
| 489 | 489 | ||
| 490 | tx->ack = 1; | ||
| 491 | tx->callback = NULL; | 490 | tx->callback = NULL; |
| 492 | cookie = tx->tx_submit(tx); | 491 | cookie = tx->tx_submit(tx); |
| 493 | 492 | ||
| @@ -525,7 +524,8 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
| 525 | 524 | ||
| 526 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | 525 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
| 527 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | 526 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); |
| 528 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | 527 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
| 528 | DMA_CTRL_ACK); | ||
| 529 | 529 | ||
| 530 | if (!tx) { | 530 | if (!tx) { |
| 531 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 531 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); |
| @@ -533,7 +533,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
| 533 | return -ENOMEM; | 533 | return -ENOMEM; |
| 534 | } | 534 | } |
| 535 | 535 | ||
| 536 | tx->ack = 1; | ||
| 537 | tx->callback = NULL; | 536 | tx->callback = NULL; |
| 538 | cookie = tx->tx_submit(tx); | 537 | cookie = tx->tx_submit(tx); |
| 539 | 538 | ||
| @@ -574,7 +573,8 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
| 574 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | 573 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
| 575 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, | 574 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, |
| 576 | DMA_FROM_DEVICE); | 575 | DMA_FROM_DEVICE); |
| 577 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | 576 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
| 577 | DMA_CTRL_ACK); | ||
| 578 | 578 | ||
| 579 | if (!tx) { | 579 | if (!tx) { |
| 580 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | 580 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); |
| @@ -582,7 +582,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
| 582 | return -ENOMEM; | 582 | return -ENOMEM; |
| 583 | } | 583 | } |
| 584 | 584 | ||
| 585 | tx->ack = 1; | ||
| 586 | tx->callback = NULL; | 585 | tx->callback = NULL; |
| 587 | cookie = tx->tx_submit(tx); | 586 | cookie = tx->tx_submit(tx); |
| 588 | 587 | ||
| @@ -600,8 +599,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
| 600 | { | 599 | { |
| 601 | tx->chan = chan; | 600 | tx->chan = chan; |
| 602 | spin_lock_init(&tx->lock); | 601 | spin_lock_init(&tx->lock); |
| 603 | INIT_LIST_HEAD(&tx->depend_node); | ||
| 604 | INIT_LIST_HEAD(&tx->depend_list); | ||
| 605 | } | 602 | } |
| 606 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | 603 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
| 607 | 604 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index df163687c91a..054eabffc185 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
| @@ -412,7 +412,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *chan) | |||
| 412 | } | 412 | } |
| 413 | 413 | ||
| 414 | static struct dma_async_tx_descriptor * | 414 | static struct dma_async_tx_descriptor * |
| 415 | fsl_dma_prep_interrupt(struct dma_chan *chan) | 415 | fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) |
| 416 | { | 416 | { |
| 417 | struct fsl_dma_chan *fsl_chan; | 417 | struct fsl_dma_chan *fsl_chan; |
| 418 | struct fsl_desc_sw *new; | 418 | struct fsl_desc_sw *new; |
| @@ -429,7 +429,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan) | |||
| 429 | } | 429 | } |
| 430 | 430 | ||
| 431 | new->async_tx.cookie = -EBUSY; | 431 | new->async_tx.cookie = -EBUSY; |
| 432 | new->async_tx.ack = 0; | 432 | new->async_tx.flags = flags; |
| 433 | 433 | ||
| 434 | /* Insert the link descriptor to the LD ring */ | 434 | /* Insert the link descriptor to the LD ring */ |
| 435 | list_add_tail(&new->node, &new->async_tx.tx_list); | 435 | list_add_tail(&new->node, &new->async_tx.tx_list); |
| @@ -482,7 +482,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
| 482 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); | 482 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); |
| 483 | 483 | ||
| 484 | new->async_tx.cookie = 0; | 484 | new->async_tx.cookie = 0; |
| 485 | new->async_tx.ack = 1; | 485 | async_tx_ack(&new->async_tx); |
| 486 | 486 | ||
| 487 | prev = new; | 487 | prev = new; |
| 488 | len -= copy; | 488 | len -= copy; |
| @@ -493,7 +493,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
| 493 | list_add_tail(&new->node, &first->async_tx.tx_list); | 493 | list_add_tail(&new->node, &first->async_tx.tx_list); |
| 494 | } while (len); | 494 | } while (len); |
| 495 | 495 | ||
| 496 | new->async_tx.ack = 0; /* client is in control of this ack */ | 496 | new->async_tx.flags = flags; /* client is in control of this ack */ |
| 497 | new->async_tx.cookie = -EBUSY; | 497 | new->async_tx.cookie = -EBUSY; |
| 498 | 498 | ||
| 499 | /* Set End-of-link to the last link descriptor of new list*/ | 499 | /* Set End-of-link to the last link descriptor of new list*/ |
| @@ -658,13 +658,6 @@ static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) | |||
| 658 | fsl_chan_xfer_ld_queue(fsl_chan); | 658 | fsl_chan_xfer_ld_queue(fsl_chan); |
| 659 | } | 659 | } |
| 660 | 660 | ||
| 661 | static void fsl_dma_dependency_added(struct dma_chan *chan) | ||
| 662 | { | ||
| 663 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
| 664 | |||
| 665 | fsl_chan_ld_cleanup(fsl_chan); | ||
| 666 | } | ||
| 667 | |||
| 668 | /** | 661 | /** |
| 669 | * fsl_dma_is_complete - Determine the DMA status | 662 | * fsl_dma_is_complete - Determine the DMA status |
| 670 | * @fsl_chan : Freescale DMA channel | 663 | * @fsl_chan : Freescale DMA channel |
| @@ -696,6 +689,8 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | |||
| 696 | { | 689 | { |
| 697 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | 690 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; |
| 698 | u32 stat; | 691 | u32 stat; |
| 692 | int update_cookie = 0; | ||
| 693 | int xfer_ld_q = 0; | ||
| 699 | 694 | ||
| 700 | stat = get_sr(fsl_chan); | 695 | stat = get_sr(fsl_chan); |
| 701 | dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", | 696 | dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", |
| @@ -720,8 +715,8 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | |||
| 720 | * Now, update the completed cookie, and continue the | 715 | * Now, update the completed cookie, and continue the |
| 721 | * next uncompleted transfer. | 716 | * next uncompleted transfer. |
| 722 | */ | 717 | */ |
| 723 | fsl_dma_update_completed_cookie(fsl_chan); | 718 | update_cookie = 1; |
| 724 | fsl_chan_xfer_ld_queue(fsl_chan); | 719 | xfer_ld_q = 1; |
| 725 | } | 720 | } |
| 726 | stat &= ~FSL_DMA_SR_PE; | 721 | stat &= ~FSL_DMA_SR_PE; |
| 727 | } | 722 | } |
| @@ -734,19 +729,33 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | |||
| 734 | dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", | 729 | dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", |
| 735 | (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); | 730 | (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); |
| 736 | stat &= ~FSL_DMA_SR_EOSI; | 731 | stat &= ~FSL_DMA_SR_EOSI; |
| 737 | fsl_dma_update_completed_cookie(fsl_chan); | 732 | update_cookie = 1; |
| 733 | } | ||
| 734 | |||
| 735 | /* For MPC8349, EOCDI event need to update cookie | ||
| 736 | * and start the next transfer if it exist. | ||
| 737 | */ | ||
| 738 | if (stat & FSL_DMA_SR_EOCDI) { | ||
| 739 | dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); | ||
| 740 | stat &= ~FSL_DMA_SR_EOCDI; | ||
| 741 | update_cookie = 1; | ||
| 742 | xfer_ld_q = 1; | ||
| 738 | } | 743 | } |
| 739 | 744 | ||
| 740 | /* If it current transfer is the end-of-transfer, | 745 | /* If it current transfer is the end-of-transfer, |
| 741 | * we should clear the Channel Start bit for | 746 | * we should clear the Channel Start bit for |
| 742 | * prepare next transfer. | 747 | * prepare next transfer. |
| 743 | */ | 748 | */ |
| 744 | if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) { | 749 | if (stat & FSL_DMA_SR_EOLNI) { |
| 745 | dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); | 750 | dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); |
| 746 | stat &= ~FSL_DMA_SR_EOLNI; | 751 | stat &= ~FSL_DMA_SR_EOLNI; |
| 747 | fsl_chan_xfer_ld_queue(fsl_chan); | 752 | xfer_ld_q = 1; |
| 748 | } | 753 | } |
| 749 | 754 | ||
| 755 | if (update_cookie) | ||
| 756 | fsl_dma_update_completed_cookie(fsl_chan); | ||
| 757 | if (xfer_ld_q) | ||
| 758 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
| 750 | if (stat) | 759 | if (stat) |
| 751 | dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", | 760 | dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", |
| 752 | stat); | 761 | stat); |
| @@ -776,15 +785,13 @@ static void dma_do_tasklet(unsigned long data) | |||
| 776 | fsl_chan_ld_cleanup(fsl_chan); | 785 | fsl_chan_ld_cleanup(fsl_chan); |
| 777 | } | 786 | } |
| 778 | 787 | ||
| 779 | #ifdef FSL_DMA_CALLBACKTEST | 788 | static void fsl_dma_callback_test(void *param) |
| 780 | static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) | ||
| 781 | { | 789 | { |
| 790 | struct fsl_dma_chan *fsl_chan = param; | ||
| 782 | if (fsl_chan) | 791 | if (fsl_chan) |
| 783 | dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); | 792 | dev_dbg(fsl_chan->dev, "selftest: callback is ok!\n"); |
| 784 | } | 793 | } |
| 785 | #endif | ||
| 786 | 794 | ||
| 787 | #ifdef CONFIG_FSL_DMA_SELFTEST | ||
| 788 | static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | 795 | static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) |
| 789 | { | 796 | { |
| 790 | struct dma_chan *chan; | 797 | struct dma_chan *chan; |
| @@ -867,7 +874,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | |||
| 867 | async_tx_ack(tx3); | 874 | async_tx_ack(tx3); |
| 868 | 875 | ||
| 869 | /* Interrupt tx test */ | 876 | /* Interrupt tx test */ |
| 870 | tx1 = fsl_dma_prep_interrupt(chan); | 877 | tx1 = fsl_dma_prep_interrupt(chan, 0); |
| 871 | async_tx_ack(tx1); | 878 | async_tx_ack(tx1); |
| 872 | cookie = fsl_dma_tx_submit(tx1); | 879 | cookie = fsl_dma_tx_submit(tx1); |
| 873 | 880 | ||
| @@ -875,13 +882,11 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | |||
| 875 | cookie = fsl_dma_tx_submit(tx3); | 882 | cookie = fsl_dma_tx_submit(tx3); |
| 876 | cookie = fsl_dma_tx_submit(tx2); | 883 | cookie = fsl_dma_tx_submit(tx2); |
| 877 | 884 | ||
| 878 | #ifdef FSL_DMA_CALLBACKTEST | ||
| 879 | if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *) | 885 | if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *) |
| 880 | dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) { | 886 | dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) { |
| 881 | tx3->callback = fsl_dma_callback_test; | 887 | tx3->callback = fsl_dma_callback_test; |
| 882 | tx3->callback_param = fsl_chan; | 888 | tx3->callback_param = fsl_chan; |
| 883 | } | 889 | } |
| 884 | #endif | ||
| 885 | fsl_dma_memcpy_issue_pending(chan); | 890 | fsl_dma_memcpy_issue_pending(chan); |
| 886 | msleep(2); | 891 | msleep(2); |
| 887 | 892 | ||
| @@ -906,7 +911,6 @@ out: | |||
| 906 | kfree(src); | 911 | kfree(src); |
| 907 | return err; | 912 | return err; |
| 908 | } | 913 | } |
| 909 | #endif | ||
| 910 | 914 | ||
| 911 | static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, | 915 | static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, |
| 912 | const struct of_device_id *match) | 916 | const struct of_device_id *match) |
| @@ -997,11 +1001,9 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, | |||
| 997 | } | 1001 | } |
| 998 | } | 1002 | } |
| 999 | 1003 | ||
| 1000 | #ifdef CONFIG_FSL_DMA_SELFTEST | ||
| 1001 | err = fsl_dma_self_test(new_fsl_chan); | 1004 | err = fsl_dma_self_test(new_fsl_chan); |
| 1002 | if (err) | 1005 | if (err) |
| 1003 | goto err; | 1006 | goto err; |
| 1004 | #endif | ||
| 1005 | 1007 | ||
| 1006 | dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, | 1008 | dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, |
| 1007 | match->compatible, new_fsl_chan->irq); | 1009 | match->compatible, new_fsl_chan->irq); |
| @@ -1080,7 +1082,6 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, | |||
| 1080 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 1082 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
| 1081 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; | 1083 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; |
| 1082 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1084 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
| 1083 | fdev->common.device_dependency_added = fsl_dma_dependency_added; | ||
| 1084 | fdev->common.dev = &dev->dev; | 1085 | fdev->common.dev = &dev->dev; |
| 1085 | 1086 | ||
| 1086 | irq = irq_of_parse_and_map(dev->node, 0); | 1087 | irq = irq_of_parse_and_map(dev->node, 0); |
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 4017d9e7acd2..318e8a22d814 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
| @@ -212,14 +212,14 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 212 | u32 copy; | 212 | u32 copy; |
| 213 | size_t len; | 213 | size_t len; |
| 214 | dma_addr_t src, dst; | 214 | dma_addr_t src, dst; |
| 215 | int orig_ack; | 215 | unsigned long orig_flags; |
| 216 | unsigned int desc_count = 0; | 216 | unsigned int desc_count = 0; |
| 217 | 217 | ||
| 218 | /* src and dest and len are stored in the initial descriptor */ | 218 | /* src and dest and len are stored in the initial descriptor */ |
| 219 | len = first->len; | 219 | len = first->len; |
| 220 | src = first->src; | 220 | src = first->src; |
| 221 | dst = first->dst; | 221 | dst = first->dst; |
| 222 | orig_ack = first->async_tx.ack; | 222 | orig_flags = first->async_tx.flags; |
| 223 | new = first; | 223 | new = first; |
| 224 | 224 | ||
| 225 | spin_lock_bh(&ioat_chan->desc_lock); | 225 | spin_lock_bh(&ioat_chan->desc_lock); |
| @@ -228,7 +228,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 228 | do { | 228 | do { |
| 229 | copy = min_t(size_t, len, ioat_chan->xfercap); | 229 | copy = min_t(size_t, len, ioat_chan->xfercap); |
| 230 | 230 | ||
| 231 | new->async_tx.ack = 1; | 231 | async_tx_ack(&new->async_tx); |
| 232 | 232 | ||
| 233 | hw = new->hw; | 233 | hw = new->hw; |
| 234 | hw->size = copy; | 234 | hw->size = copy; |
| @@ -264,7 +264,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | new->tx_cnt = desc_count; | 266 | new->tx_cnt = desc_count; |
| 267 | new->async_tx.ack = orig_ack; /* client is in control of this ack */ | 267 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ |
| 268 | 268 | ||
| 269 | /* store the original values for use in later cleanup */ | 269 | /* store the original values for use in later cleanup */ |
| 270 | if (new != first) { | 270 | if (new != first) { |
| @@ -304,14 +304,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 304 | u32 copy; | 304 | u32 copy; |
| 305 | size_t len; | 305 | size_t len; |
| 306 | dma_addr_t src, dst; | 306 | dma_addr_t src, dst; |
| 307 | int orig_ack; | 307 | unsigned long orig_flags; |
| 308 | unsigned int desc_count = 0; | 308 | unsigned int desc_count = 0; |
| 309 | 309 | ||
| 310 | /* src and dest and len are stored in the initial descriptor */ | 310 | /* src and dest and len are stored in the initial descriptor */ |
| 311 | len = first->len; | 311 | len = first->len; |
| 312 | src = first->src; | 312 | src = first->src; |
| 313 | dst = first->dst; | 313 | dst = first->dst; |
| 314 | orig_ack = first->async_tx.ack; | 314 | orig_flags = first->async_tx.flags; |
| 315 | new = first; | 315 | new = first; |
| 316 | 316 | ||
| 317 | /* | 317 | /* |
| @@ -321,7 +321,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 321 | do { | 321 | do { |
| 322 | copy = min_t(size_t, len, ioat_chan->xfercap); | 322 | copy = min_t(size_t, len, ioat_chan->xfercap); |
| 323 | 323 | ||
| 324 | new->async_tx.ack = 1; | 324 | async_tx_ack(&new->async_tx); |
| 325 | 325 | ||
| 326 | hw = new->hw; | 326 | hw = new->hw; |
| 327 | hw->size = copy; | 327 | hw->size = copy; |
| @@ -349,7 +349,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 349 | } | 349 | } |
| 350 | 350 | ||
| 351 | new->tx_cnt = desc_count; | 351 | new->tx_cnt = desc_count; |
| 352 | new->async_tx.ack = orig_ack; /* client is in control of this ack */ | 352 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ |
| 353 | 353 | ||
| 354 | /* store the original values for use in later cleanup */ | 354 | /* store the original values for use in later cleanup */ |
| 355 | if (new != first) { | 355 | if (new != first) { |
| @@ -714,7 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
| 714 | new->len = len; | 714 | new->len = len; |
| 715 | new->dst = dma_dest; | 715 | new->dst = dma_dest; |
| 716 | new->src = dma_src; | 716 | new->src = dma_src; |
| 717 | new->async_tx.ack = 0; | 717 | new->async_tx.flags = flags; |
| 718 | return &new->async_tx; | 718 | return &new->async_tx; |
| 719 | } else | 719 | } else |
| 720 | return NULL; | 720 | return NULL; |
| @@ -742,7 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
| 742 | new->len = len; | 742 | new->len = len; |
| 743 | new->dst = dma_dest; | 743 | new->dst = dma_dest; |
| 744 | new->src = dma_src; | 744 | new->src = dma_src; |
| 745 | new->async_tx.ack = 0; | 745 | new->async_tx.flags = flags; |
| 746 | return &new->async_tx; | 746 | return &new->async_tx; |
| 747 | } else | 747 | } else |
| 748 | return NULL; | 748 | return NULL; |
| @@ -842,7 +842,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
| 842 | * a completed entry, but not the last, so clean | 842 | * a completed entry, but not the last, so clean |
| 843 | * up if the client is done with the descriptor | 843 | * up if the client is done with the descriptor |
| 844 | */ | 844 | */ |
| 845 | if (desc->async_tx.ack) { | 845 | if (async_tx_test_ack(&desc->async_tx)) { |
| 846 | list_del(&desc->node); | 846 | list_del(&desc->node); |
| 847 | list_add_tail(&desc->node, | 847 | list_add_tail(&desc->node, |
| 848 | &ioat_chan->free_desc); | 848 | &ioat_chan->free_desc); |
| @@ -924,17 +924,6 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
| 924 | spin_unlock_bh(&ioat_chan->cleanup_lock); | 924 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
| 925 | } | 925 | } |
| 926 | 926 | ||
| 927 | static void ioat_dma_dependency_added(struct dma_chan *chan) | ||
| 928 | { | ||
| 929 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
| 930 | spin_lock_bh(&ioat_chan->desc_lock); | ||
| 931 | if (ioat_chan->pending == 0) { | ||
| 932 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 933 | ioat_dma_memcpy_cleanup(ioat_chan); | ||
| 934 | } else | ||
| 935 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 936 | } | ||
| 937 | |||
| 938 | /** | 927 | /** |
| 939 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction | 928 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction |
| 940 | * @chan: IOAT DMA channel handle | 929 | * @chan: IOAT DMA channel handle |
| @@ -990,7 +979,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
| 990 | desc->hw->size = 0; | 979 | desc->hw->size = 0; |
| 991 | desc->hw->src_addr = 0; | 980 | desc->hw->src_addr = 0; |
| 992 | desc->hw->dst_addr = 0; | 981 | desc->hw->dst_addr = 0; |
| 993 | desc->async_tx.ack = 1; | 982 | async_tx_ack(&desc->async_tx); |
| 994 | switch (ioat_chan->device->version) { | 983 | switch (ioat_chan->device->version) { |
| 995 | case IOAT_VER_1_2: | 984 | case IOAT_VER_1_2: |
| 996 | desc->hw->next = 0; | 985 | desc->hw->next = 0; |
| @@ -1316,7 +1305,6 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | |||
| 1316 | 1305 | ||
| 1317 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); | 1306 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); |
| 1318 | device->common.device_is_tx_complete = ioat_dma_is_complete; | 1307 | device->common.device_is_tx_complete = ioat_dma_is_complete; |
| 1319 | device->common.device_dependency_added = ioat_dma_dependency_added; | ||
| 1320 | switch (device->version) { | 1308 | switch (device->version) { |
| 1321 | case IOAT_VER_1_2: | 1309 | case IOAT_VER_1_2: |
| 1322 | device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | 1310 | device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index f82b0906d466..762b729672e0 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
| @@ -63,7 +63,6 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
| 63 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) | 63 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) |
| 64 | { | 64 | { |
| 65 | BUG_ON(desc->async_tx.cookie < 0); | 65 | BUG_ON(desc->async_tx.cookie < 0); |
| 66 | spin_lock_bh(&desc->async_tx.lock); | ||
| 67 | if (desc->async_tx.cookie > 0) { | 66 | if (desc->async_tx.cookie > 0) { |
| 68 | cookie = desc->async_tx.cookie; | 67 | cookie = desc->async_tx.cookie; |
| 69 | desc->async_tx.cookie = 0; | 68 | desc->async_tx.cookie = 0; |
| @@ -101,7 +100,6 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
| 101 | 100 | ||
| 102 | /* run dependent operations */ | 101 | /* run dependent operations */ |
| 103 | async_tx_run_dependencies(&desc->async_tx); | 102 | async_tx_run_dependencies(&desc->async_tx); |
| 104 | spin_unlock_bh(&desc->async_tx.lock); | ||
| 105 | 103 | ||
| 106 | return cookie; | 104 | return cookie; |
| 107 | } | 105 | } |
| @@ -113,7 +111,7 @@ iop_adma_clean_slot(struct iop_adma_desc_slot *desc, | |||
| 113 | /* the client is allowed to attach dependent operations | 111 | /* the client is allowed to attach dependent operations |
| 114 | * until 'ack' is set | 112 | * until 'ack' is set |
| 115 | */ | 113 | */ |
| 116 | if (!desc->async_tx.ack) | 114 | if (!async_tx_test_ack(&desc->async_tx)) |
| 117 | return 0; | 115 | return 0; |
| 118 | 116 | ||
| 119 | /* leave the last descriptor in the chain | 117 | /* leave the last descriptor in the chain |
| @@ -150,7 +148,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
| 150 | "this_desc: %#x next_desc: %#x ack: %d\n", | 148 | "this_desc: %#x next_desc: %#x ack: %d\n", |
| 151 | iter->async_tx.cookie, iter->idx, busy, | 149 | iter->async_tx.cookie, iter->idx, busy, |
| 152 | iter->async_tx.phys, iop_desc_get_next_desc(iter), | 150 | iter->async_tx.phys, iop_desc_get_next_desc(iter), |
| 153 | iter->async_tx.ack); | 151 | async_tx_test_ack(&iter->async_tx)); |
| 154 | prefetch(_iter); | 152 | prefetch(_iter); |
| 155 | prefetch(&_iter->async_tx); | 153 | prefetch(&_iter->async_tx); |
| 156 | 154 | ||
| @@ -257,8 +255,6 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
| 257 | 255 | ||
| 258 | BUG_ON(!seen_current); | 256 | BUG_ON(!seen_current); |
| 259 | 257 | ||
| 260 | iop_chan_idle(busy, iop_chan); | ||
| 261 | |||
| 262 | if (cookie > 0) { | 258 | if (cookie > 0) { |
| 263 | iop_chan->completed_cookie = cookie; | 259 | iop_chan->completed_cookie = cookie; |
| 264 | pr_debug("\tcompleted cookie %d\n", cookie); | 260 | pr_debug("\tcompleted cookie %d\n", cookie); |
| @@ -275,8 +271,11 @@ iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
| 275 | 271 | ||
| 276 | static void iop_adma_tasklet(unsigned long data) | 272 | static void iop_adma_tasklet(unsigned long data) |
| 277 | { | 273 | { |
| 278 | struct iop_adma_chan *chan = (struct iop_adma_chan *) data; | 274 | struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; |
| 279 | __iop_adma_slot_cleanup(chan); | 275 | |
| 276 | spin_lock(&iop_chan->lock); | ||
| 277 | __iop_adma_slot_cleanup(iop_chan); | ||
| 278 | spin_unlock(&iop_chan->lock); | ||
| 280 | } | 279 | } |
| 281 | 280 | ||
| 282 | static struct iop_adma_desc_slot * | 281 | static struct iop_adma_desc_slot * |
| @@ -339,9 +338,7 @@ retry: | |||
| 339 | 338 | ||
| 340 | /* pre-ack all but the last descriptor */ | 339 | /* pre-ack all but the last descriptor */ |
| 341 | if (num_slots != slots_per_op) | 340 | if (num_slots != slots_per_op) |
| 342 | iter->async_tx.ack = 1; | 341 | async_tx_ack(&iter->async_tx); |
| 343 | else | ||
| 344 | iter->async_tx.ack = 0; | ||
| 345 | 342 | ||
| 346 | list_add_tail(&iter->chain_node, &chain); | 343 | list_add_tail(&iter->chain_node, &chain); |
| 347 | alloc_tail = iter; | 344 | alloc_tail = iter; |
| @@ -514,7 +511,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan) | |||
| 514 | } | 511 | } |
| 515 | 512 | ||
| 516 | static struct dma_async_tx_descriptor * | 513 | static struct dma_async_tx_descriptor * |
| 517 | iop_adma_prep_dma_interrupt(struct dma_chan *chan) | 514 | iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) |
| 518 | { | 515 | { |
| 519 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 516 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
| 520 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 517 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
| @@ -529,6 +526,7 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan) | |||
| 529 | grp_start = sw_desc->group_head; | 526 | grp_start = sw_desc->group_head; |
| 530 | iop_desc_init_interrupt(grp_start, iop_chan); | 527 | iop_desc_init_interrupt(grp_start, iop_chan); |
| 531 | grp_start->unmap_len = 0; | 528 | grp_start->unmap_len = 0; |
| 529 | sw_desc->async_tx.flags = flags; | ||
| 532 | } | 530 | } |
| 533 | spin_unlock_bh(&iop_chan->lock); | 531 | spin_unlock_bh(&iop_chan->lock); |
| 534 | 532 | ||
| @@ -561,6 +559,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
| 561 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); | 559 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); |
| 562 | sw_desc->unmap_src_cnt = 1; | 560 | sw_desc->unmap_src_cnt = 1; |
| 563 | sw_desc->unmap_len = len; | 561 | sw_desc->unmap_len = len; |
| 562 | sw_desc->async_tx.flags = flags; | ||
| 564 | } | 563 | } |
| 565 | spin_unlock_bh(&iop_chan->lock); | 564 | spin_unlock_bh(&iop_chan->lock); |
| 566 | 565 | ||
| @@ -593,6 +592,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, | |||
| 593 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 592 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
| 594 | sw_desc->unmap_src_cnt = 1; | 593 | sw_desc->unmap_src_cnt = 1; |
| 595 | sw_desc->unmap_len = len; | 594 | sw_desc->unmap_len = len; |
| 595 | sw_desc->async_tx.flags = flags; | ||
| 596 | } | 596 | } |
| 597 | spin_unlock_bh(&iop_chan->lock); | 597 | spin_unlock_bh(&iop_chan->lock); |
| 598 | 598 | ||
| @@ -626,6 +626,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
| 626 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 626 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
| 627 | sw_desc->unmap_src_cnt = src_cnt; | 627 | sw_desc->unmap_src_cnt = src_cnt; |
| 628 | sw_desc->unmap_len = len; | 628 | sw_desc->unmap_len = len; |
| 629 | sw_desc->async_tx.flags = flags; | ||
| 629 | while (src_cnt--) | 630 | while (src_cnt--) |
| 630 | iop_desc_set_xor_src_addr(grp_start, src_cnt, | 631 | iop_desc_set_xor_src_addr(grp_start, src_cnt, |
| 631 | dma_src[src_cnt]); | 632 | dma_src[src_cnt]); |
| @@ -662,6 +663,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | |||
| 662 | __func__, grp_start->xor_check_result); | 663 | __func__, grp_start->xor_check_result); |
| 663 | sw_desc->unmap_src_cnt = src_cnt; | 664 | sw_desc->unmap_src_cnt = src_cnt; |
| 664 | sw_desc->unmap_len = len; | 665 | sw_desc->unmap_len = len; |
| 666 | sw_desc->async_tx.flags = flags; | ||
| 665 | while (src_cnt--) | 667 | while (src_cnt--) |
| 666 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, | 668 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, |
| 667 | dma_src[src_cnt]); | 669 | dma_src[src_cnt]); |
| @@ -671,12 +673,6 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | |||
| 671 | return sw_desc ? &sw_desc->async_tx : NULL; | 673 | return sw_desc ? &sw_desc->async_tx : NULL; |
| 672 | } | 674 | } |
| 673 | 675 | ||
| 674 | static void iop_adma_dependency_added(struct dma_chan *chan) | ||
| 675 | { | ||
| 676 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | ||
| 677 | tasklet_schedule(&iop_chan->irq_tasklet); | ||
| 678 | } | ||
| 679 | |||
| 680 | static void iop_adma_free_chan_resources(struct dma_chan *chan) | 676 | static void iop_adma_free_chan_resources(struct dma_chan *chan) |
| 681 | { | 677 | { |
| 682 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 678 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
| @@ -854,11 +850,11 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
| 854 | src_dma = dma_map_single(dma_chan->device->dev, src, | 850 | src_dma = dma_map_single(dma_chan->device->dev, src, |
| 855 | IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); | 851 | IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); |
| 856 | tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | 852 | tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, |
| 857 | IOP_ADMA_TEST_SIZE, 1); | 853 | IOP_ADMA_TEST_SIZE, |
| 854 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 858 | 855 | ||
| 859 | cookie = iop_adma_tx_submit(tx); | 856 | cookie = iop_adma_tx_submit(tx); |
| 860 | iop_adma_issue_pending(dma_chan); | 857 | iop_adma_issue_pending(dma_chan); |
| 861 | async_tx_ack(tx); | ||
| 862 | msleep(1); | 858 | msleep(1); |
| 863 | 859 | ||
| 864 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | 860 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != |
| @@ -954,11 +950,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
| 954 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | 950 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], |
| 955 | 0, PAGE_SIZE, DMA_TO_DEVICE); | 951 | 0, PAGE_SIZE, DMA_TO_DEVICE); |
| 956 | tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 952 | tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
| 957 | IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1); | 953 | IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, |
| 954 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 958 | 955 | ||
| 959 | cookie = iop_adma_tx_submit(tx); | 956 | cookie = iop_adma_tx_submit(tx); |
| 960 | iop_adma_issue_pending(dma_chan); | 957 | iop_adma_issue_pending(dma_chan); |
| 961 | async_tx_ack(tx); | ||
| 962 | msleep(8); | 958 | msleep(8); |
| 963 | 959 | ||
| 964 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | 960 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != |
| @@ -1001,11 +997,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
| 1001 | DMA_TO_DEVICE); | 997 | DMA_TO_DEVICE); |
| 1002 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, | 998 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, |
| 1003 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, | 999 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
| 1004 | &zero_sum_result, 1); | 1000 | &zero_sum_result, |
| 1001 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 1005 | 1002 | ||
| 1006 | cookie = iop_adma_tx_submit(tx); | 1003 | cookie = iop_adma_tx_submit(tx); |
| 1007 | iop_adma_issue_pending(dma_chan); | 1004 | iop_adma_issue_pending(dma_chan); |
| 1008 | async_tx_ack(tx); | ||
| 1009 | msleep(8); | 1005 | msleep(8); |
| 1010 | 1006 | ||
| 1011 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | 1007 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
| @@ -1025,11 +1021,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
| 1025 | /* test memset */ | 1021 | /* test memset */ |
| 1026 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, | 1022 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, |
| 1027 | PAGE_SIZE, DMA_FROM_DEVICE); | 1023 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 1028 | tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1); | 1024 | tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, |
| 1025 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 1029 | 1026 | ||
| 1030 | cookie = iop_adma_tx_submit(tx); | 1027 | cookie = iop_adma_tx_submit(tx); |
| 1031 | iop_adma_issue_pending(dma_chan); | 1028 | iop_adma_issue_pending(dma_chan); |
| 1032 | async_tx_ack(tx); | ||
| 1033 | msleep(8); | 1029 | msleep(8); |
| 1034 | 1030 | ||
| 1035 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | 1031 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
| @@ -1057,11 +1053,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
| 1057 | DMA_TO_DEVICE); | 1053 | DMA_TO_DEVICE); |
| 1058 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, | 1054 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, |
| 1059 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, | 1055 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
| 1060 | &zero_sum_result, 1); | 1056 | &zero_sum_result, |
| 1057 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 1061 | 1058 | ||
| 1062 | cookie = iop_adma_tx_submit(tx); | 1059 | cookie = iop_adma_tx_submit(tx); |
| 1063 | iop_adma_issue_pending(dma_chan); | 1060 | iop_adma_issue_pending(dma_chan); |
| 1064 | async_tx_ack(tx); | ||
| 1065 | msleep(8); | 1061 | msleep(8); |
| 1066 | 1062 | ||
| 1067 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | 1063 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
| @@ -1177,7 +1173,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
| 1177 | dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; | 1173 | dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; |
| 1178 | dma_dev->device_is_tx_complete = iop_adma_is_complete; | 1174 | dma_dev->device_is_tx_complete = iop_adma_is_complete; |
| 1179 | dma_dev->device_issue_pending = iop_adma_issue_pending; | 1175 | dma_dev->device_issue_pending = iop_adma_issue_pending; |
| 1180 | dma_dev->device_dependency_added = iop_adma_dependency_added; | ||
| 1181 | dma_dev->dev = &pdev->dev; | 1176 | dma_dev->dev = &pdev->dev; |
| 1182 | 1177 | ||
| 1183 | /* set prep routines based on capability */ | 1178 | /* set prep routines based on capability */ |
| @@ -1232,9 +1227,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
| 1232 | } | 1227 | } |
| 1233 | 1228 | ||
| 1234 | spin_lock_init(&iop_chan->lock); | 1229 | spin_lock_init(&iop_chan->lock); |
| 1235 | init_timer(&iop_chan->cleanup_watchdog); | ||
| 1236 | iop_chan->cleanup_watchdog.data = (unsigned long) iop_chan; | ||
| 1237 | iop_chan->cleanup_watchdog.function = iop_adma_tasklet; | ||
| 1238 | INIT_LIST_HEAD(&iop_chan->chain); | 1230 | INIT_LIST_HEAD(&iop_chan->chain); |
| 1239 | INIT_LIST_HEAD(&iop_chan->all_slots); | 1231 | INIT_LIST_HEAD(&iop_chan->all_slots); |
| 1240 | INIT_RCU_HEAD(&iop_chan->common.rcu); | 1232 | INIT_RCU_HEAD(&iop_chan->common.rcu); |
| @@ -1298,7 +1290,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) | |||
| 1298 | grp_start = sw_desc->group_head; | 1290 | grp_start = sw_desc->group_head; |
| 1299 | 1291 | ||
| 1300 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); | 1292 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); |
| 1301 | sw_desc->async_tx.ack = 1; | 1293 | async_tx_ack(&sw_desc->async_tx); |
| 1302 | iop_desc_init_memcpy(grp_start, 0); | 1294 | iop_desc_init_memcpy(grp_start, 0); |
| 1303 | iop_desc_set_byte_count(grp_start, iop_chan, 0); | 1295 | iop_desc_set_byte_count(grp_start, iop_chan, 0); |
| 1304 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); | 1296 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); |
| @@ -1354,7 +1346,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
| 1354 | if (sw_desc) { | 1346 | if (sw_desc) { |
| 1355 | grp_start = sw_desc->group_head; | 1347 | grp_start = sw_desc->group_head; |
| 1356 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); | 1348 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); |
| 1357 | sw_desc->async_tx.ack = 1; | 1349 | async_tx_ack(&sw_desc->async_tx); |
| 1358 | iop_desc_init_null_xor(grp_start, 2, 0); | 1350 | iop_desc_init_null_xor(grp_start, 2, 0); |
| 1359 | iop_desc_set_byte_count(grp_start, iop_chan, 0); | 1351 | iop_desc_set_byte_count(grp_start, iop_chan, 0); |
| 1360 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); | 1352 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); |
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h index efd9a5eb1008..90d14ee564f5 100644 --- a/include/asm-arm/arch-iop13xx/adma.h +++ b/include/asm-arm/arch-iop13xx/adma.h | |||
| @@ -454,11 +454,6 @@ static inline void iop_chan_append(struct iop_adma_chan *chan) | |||
| 454 | __raw_writel(adma_accr, ADMA_ACCR(chan)); | 454 | __raw_writel(adma_accr, ADMA_ACCR(chan)); |
| 455 | } | 455 | } |
| 456 | 456 | ||
| 457 | static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan) | ||
| 458 | { | ||
| 459 | do { } while (0); | ||
| 460 | } | ||
| 461 | |||
| 462 | static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) | 457 | static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) |
| 463 | { | 458 | { |
| 464 | return __raw_readl(ADMA_ACSR(chan)); | 459 | return __raw_readl(ADMA_ACSR(chan)); |
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h index 5c529e6a5e3b..84d635b0a71a 100644 --- a/include/asm-arm/hardware/iop3xx-adma.h +++ b/include/asm-arm/hardware/iop3xx-adma.h | |||
| @@ -767,20 +767,12 @@ static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) | |||
| 767 | static inline void iop_chan_append(struct iop_adma_chan *chan) | 767 | static inline void iop_chan_append(struct iop_adma_chan *chan) |
| 768 | { | 768 | { |
| 769 | u32 dma_chan_ctrl; | 769 | u32 dma_chan_ctrl; |
| 770 | /* workaround dropped interrupts on 3xx */ | ||
| 771 | mod_timer(&chan->cleanup_watchdog, jiffies + msecs_to_jiffies(3)); | ||
| 772 | 770 | ||
| 773 | dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); | 771 | dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); |
| 774 | dma_chan_ctrl |= 0x2; | 772 | dma_chan_ctrl |= 0x2; |
| 775 | __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); | 773 | __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); |
| 776 | } | 774 | } |
| 777 | 775 | ||
| 778 | static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan) | ||
| 779 | { | ||
| 780 | if (!busy) | ||
| 781 | del_timer(&chan->cleanup_watchdog); | ||
| 782 | } | ||
| 783 | |||
| 784 | static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) | 776 | static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) |
| 785 | { | 777 | { |
| 786 | return __raw_readl(DMA_CSR(chan)); | 778 | return __raw_readl(DMA_CSR(chan)); |
diff --git a/include/asm-arm/hardware/iop_adma.h b/include/asm-arm/hardware/iop_adma.h index ca8e71f44346..cb7e3611bcba 100644 --- a/include/asm-arm/hardware/iop_adma.h +++ b/include/asm-arm/hardware/iop_adma.h | |||
| @@ -51,7 +51,6 @@ struct iop_adma_device { | |||
| 51 | * @common: common dmaengine channel object members | 51 | * @common: common dmaengine channel object members |
| 52 | * @last_used: place holder for allocation to continue from where it left off | 52 | * @last_used: place holder for allocation to continue from where it left off |
| 53 | * @all_slots: complete domain of slots usable by the channel | 53 | * @all_slots: complete domain of slots usable by the channel |
| 54 | * @cleanup_watchdog: workaround missed interrupts on iop3xx | ||
| 55 | * @slots_allocated: records the actual size of the descriptor slot pool | 54 | * @slots_allocated: records the actual size of the descriptor slot pool |
| 56 | * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs | 55 | * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs |
| 57 | */ | 56 | */ |
| @@ -65,7 +64,6 @@ struct iop_adma_chan { | |||
| 65 | struct dma_chan common; | 64 | struct dma_chan common; |
| 66 | struct iop_adma_desc_slot *last_used; | 65 | struct iop_adma_desc_slot *last_used; |
| 67 | struct list_head all_slots; | 66 | struct list_head all_slots; |
| 68 | struct timer_list cleanup_watchdog; | ||
| 69 | int slots_allocated; | 67 | int slots_allocated; |
| 70 | struct tasklet_struct irq_tasklet; | 68 | struct tasklet_struct irq_tasklet; |
| 71 | }; | 69 | }; |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 34d440698293..b4d84ed6187d 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -95,12 +95,17 @@ enum dma_transaction_type { | |||
| 95 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) | 95 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) |
| 96 | 96 | ||
| 97 | /** | 97 | /** |
| 98 | * enum dma_prep_flags - DMA flags to augment operation preparation | 98 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
| 99 | * control completion, and communicate status. | ||
| 99 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of | 100 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of |
| 100 | * this transaction | 101 | * this transaction |
| 102 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client | ||
| 103 | * acknowledges receipt, i.e. has has a chance to establish any | ||
| 104 | * dependency chains | ||
| 101 | */ | 105 | */ |
| 102 | enum dma_prep_flags { | 106 | enum dma_ctrl_flags { |
| 103 | DMA_PREP_INTERRUPT = (1 << 0), | 107 | DMA_PREP_INTERRUPT = (1 << 0), |
| 108 | DMA_CTRL_ACK = (1 << 1), | ||
| 104 | }; | 109 | }; |
| 105 | 110 | ||
| 106 | /** | 111 | /** |
| @@ -211,8 +216,8 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); | |||
| 211 | * ---dma generic offload fields--- | 216 | * ---dma generic offload fields--- |
| 212 | * @cookie: tracking cookie for this transaction, set to -EBUSY if | 217 | * @cookie: tracking cookie for this transaction, set to -EBUSY if |
| 213 | * this tx is sitting on a dependency list | 218 | * this tx is sitting on a dependency list |
| 214 | * @ack: the descriptor can not be reused until the client acknowledges | 219 | * @flags: flags to augment operation preparation, control completion, and |
| 215 | * receipt, i.e. has has a chance to establish any dependency chains | 220 | * communicate status |
| 216 | * @phys: physical address of the descriptor | 221 | * @phys: physical address of the descriptor |
| 217 | * @tx_list: driver common field for operations that require multiple | 222 | * @tx_list: driver common field for operations that require multiple |
| 218 | * descriptors | 223 | * descriptors |
| @@ -221,23 +226,20 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); | |||
| 221 | * @callback: routine to call after this operation is complete | 226 | * @callback: routine to call after this operation is complete |
| 222 | * @callback_param: general parameter to pass to the callback routine | 227 | * @callback_param: general parameter to pass to the callback routine |
| 223 | * ---async_tx api specific fields--- | 228 | * ---async_tx api specific fields--- |
| 224 | * @depend_list: at completion this list of transactions are submitted | 229 | * @next: at completion submit this descriptor |
| 225 | * @depend_node: allow this transaction to be executed after another | ||
| 226 | * transaction has completed, possibly on another channel | ||
| 227 | * @parent: pointer to the next level up in the dependency chain | 230 | * @parent: pointer to the next level up in the dependency chain |
| 228 | * @lock: protect the dependency list | 231 | * @lock: protect the parent and next pointers |
| 229 | */ | 232 | */ |
| 230 | struct dma_async_tx_descriptor { | 233 | struct dma_async_tx_descriptor { |
| 231 | dma_cookie_t cookie; | 234 | dma_cookie_t cookie; |
| 232 | int ack; | 235 | enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ |
| 233 | dma_addr_t phys; | 236 | dma_addr_t phys; |
| 234 | struct list_head tx_list; | 237 | struct list_head tx_list; |
| 235 | struct dma_chan *chan; | 238 | struct dma_chan *chan; |
| 236 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 239 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
| 237 | dma_async_tx_callback callback; | 240 | dma_async_tx_callback callback; |
| 238 | void *callback_param; | 241 | void *callback_param; |
| 239 | struct list_head depend_list; | 242 | struct dma_async_tx_descriptor *next; |
| 240 | struct list_head depend_node; | ||
| 241 | struct dma_async_tx_descriptor *parent; | 243 | struct dma_async_tx_descriptor *parent; |
| 242 | spinlock_t lock; | 244 | spinlock_t lock; |
| 243 | }; | 245 | }; |
| @@ -261,7 +263,6 @@ struct dma_async_tx_descriptor { | |||
| 261 | * @device_prep_dma_zero_sum: prepares a zero_sum operation | 263 | * @device_prep_dma_zero_sum: prepares a zero_sum operation |
| 262 | * @device_prep_dma_memset: prepares a memset operation | 264 | * @device_prep_dma_memset: prepares a memset operation |
| 263 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 265 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
| 264 | * @device_dependency_added: async_tx notifies the channel about new deps | ||
| 265 | * @device_issue_pending: push pending transactions to hardware | 266 | * @device_issue_pending: push pending transactions to hardware |
| 266 | */ | 267 | */ |
| 267 | struct dma_device { | 268 | struct dma_device { |
| @@ -294,9 +295,8 @@ struct dma_device { | |||
| 294 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | 295 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, |
| 295 | unsigned long flags); | 296 | unsigned long flags); |
| 296 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 297 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
| 297 | struct dma_chan *chan); | 298 | struct dma_chan *chan, unsigned long flags); |
| 298 | 299 | ||
| 299 | void (*device_dependency_added)(struct dma_chan *chan); | ||
| 300 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, | 300 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, |
| 301 | dma_cookie_t cookie, dma_cookie_t *last, | 301 | dma_cookie_t cookie, dma_cookie_t *last, |
| 302 | dma_cookie_t *used); | 302 | dma_cookie_t *used); |
| @@ -321,7 +321,13 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
| 321 | static inline void | 321 | static inline void |
| 322 | async_tx_ack(struct dma_async_tx_descriptor *tx) | 322 | async_tx_ack(struct dma_async_tx_descriptor *tx) |
| 323 | { | 323 | { |
| 324 | tx->ack = 1; | 324 | tx->flags |= DMA_CTRL_ACK; |
| 325 | } | ||
| 326 | |||
| 327 | static inline int | ||
| 328 | async_tx_test_ack(struct dma_async_tx_descriptor *tx) | ||
| 329 | { | ||
| 330 | return tx->flags & DMA_CTRL_ACK; | ||
| 325 | } | 331 | } |
| 326 | 332 | ||
| 327 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) | 333 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) |
