aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2008-02-02 21:49:57 -0500
committerDan Williams <dan.j.williams@intel.com>2008-02-06 12:12:17 -0500
commit0036731c88fdb5bf4f04a796a30b5e445fc57f54 (patch)
tree66982e4a9fdb92fedadca35c0ccaa0b9a75e9d2e /drivers/dma/dmaengine.c
parentd909b347591a23c5a2c324fbccd4c9c966f31c67 (diff)
async_tx: kill tx_set_src and tx_set_dest methods
The tx_set_src and tx_set_dest methods were originally implemented to allow an array of addresses to be passed down from async_xor to the dmaengine driver while minimizing stack overhead. Removing these methods allows drivers to have all transaction parameters available at 'prep' time, saves two function pointers in struct dma_async_tx_descriptor, and reduces the number of indirect branches.. A consequence of moving this data to the 'prep' routine is that multi-source routines like async_xor need temporary storage to convert an array of linear addresses into an array of dma addresses. In order to keep the same stack footprint of the previous implementation the input array is reused as storage for the dma addresses. This requires that sizeof(dma_addr_t) be less than or equal to sizeof(void *). As a consequence CONFIG_DMADEVICES now depends on !CONFIG_HIGHMEM64G. It also requires that drivers be able to make descriptor resources available when the 'prep' routine is polled. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Shannon Nelson <shannon.nelson@intel.com>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c49
1 files changed, 28 insertions, 21 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index bcf52df30339..29965231b912 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -473,20 +473,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
473{ 473{
474 struct dma_device *dev = chan->device; 474 struct dma_device *dev = chan->device;
475 struct dma_async_tx_descriptor *tx; 475 struct dma_async_tx_descriptor *tx;
476 dma_addr_t addr; 476 dma_addr_t dma_dest, dma_src;
477 dma_cookie_t cookie; 477 dma_cookie_t cookie;
478 int cpu; 478 int cpu;
479 479
480 tx = dev->device_prep_dma_memcpy(chan, len, 0); 480 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
481 if (!tx) 481 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
482 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
483
484 if (!tx) {
485 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
486 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
482 return -ENOMEM; 487 return -ENOMEM;
488 }
483 489
484 tx->ack = 1; 490 tx->ack = 1;
485 tx->callback = NULL; 491 tx->callback = NULL;
486 addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
487 tx->tx_set_src(addr, tx, 0);
488 addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
489 tx->tx_set_dest(addr, tx, 0);
490 cookie = tx->tx_submit(tx); 492 cookie = tx->tx_submit(tx);
491 493
492 cpu = get_cpu(); 494 cpu = get_cpu();
@@ -517,20 +519,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
517{ 519{
518 struct dma_device *dev = chan->device; 520 struct dma_device *dev = chan->device;
519 struct dma_async_tx_descriptor *tx; 521 struct dma_async_tx_descriptor *tx;
520 dma_addr_t addr; 522 dma_addr_t dma_dest, dma_src;
521 dma_cookie_t cookie; 523 dma_cookie_t cookie;
522 int cpu; 524 int cpu;
523 525
524 tx = dev->device_prep_dma_memcpy(chan, len, 0); 526 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
525 if (!tx) 527 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
528 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
529
530 if (!tx) {
531 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
532 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
526 return -ENOMEM; 533 return -ENOMEM;
534 }
527 535
528 tx->ack = 1; 536 tx->ack = 1;
529 tx->callback = NULL; 537 tx->callback = NULL;
530 addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
531 tx->tx_set_src(addr, tx, 0);
532 addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
533 tx->tx_set_dest(addr, tx, 0);
534 cookie = tx->tx_submit(tx); 538 cookie = tx->tx_submit(tx);
535 539
536 cpu = get_cpu(); 540 cpu = get_cpu();
@@ -563,20 +567,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
563{ 567{
564 struct dma_device *dev = chan->device; 568 struct dma_device *dev = chan->device;
565 struct dma_async_tx_descriptor *tx; 569 struct dma_async_tx_descriptor *tx;
566 dma_addr_t addr; 570 dma_addr_t dma_dest, dma_src;
567 dma_cookie_t cookie; 571 dma_cookie_t cookie;
568 int cpu; 572 int cpu;
569 573
570 tx = dev->device_prep_dma_memcpy(chan, len, 0); 574 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
571 if (!tx) 575 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
576 DMA_FROM_DEVICE);
577 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
578
579 if (!tx) {
580 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
581 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
572 return -ENOMEM; 582 return -ENOMEM;
583 }
573 584
574 tx->ack = 1; 585 tx->ack = 1;
575 tx->callback = NULL; 586 tx->callback = NULL;
576 addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
577 tx->tx_set_src(addr, tx, 0);
578 addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
579 tx->tx_set_dest(addr, tx, 0);
580 cookie = tx->tx_submit(tx); 587 cookie = tx->tx_submit(tx);
581 588
582 cpu = get_cpu(); 589 cpu = get_cpu();