diff options
author | Dan Williams <dan.j.williams@intel.com> | 2007-01-02 13:10:43 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2007-07-13 11:06:11 -0400 |
commit | 7405f74badf46b5d023c5d2b670b4471525f6c91 (patch) | |
tree | 20dd20571637dba1c2b04c7b13ac208c33b5706b /drivers/dma/dmaengine.c | |
parent | 428ed6024fa74a271142f3257966e9b5e1cb37a1 (diff) |
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 182 |
1 files changed, 182 insertions, 0 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 828310d8be80..404cc7b6e705 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -59,6 +59,7 @@ | |||
59 | 59 | ||
60 | #include <linux/init.h> | 60 | #include <linux/init.h> |
61 | #include <linux/module.h> | 61 | #include <linux/module.h> |
62 | #include <linux/mm.h> | ||
62 | #include <linux/device.h> | 63 | #include <linux/device.h> |
63 | #include <linux/dmaengine.h> | 64 | #include <linux/dmaengine.h> |
64 | #include <linux/hardirq.h> | 65 | #include <linux/hardirq.h> |
@@ -66,6 +67,7 @@ | |||
66 | #include <linux/percpu.h> | 67 | #include <linux/percpu.h> |
67 | #include <linux/rcupdate.h> | 68 | #include <linux/rcupdate.h> |
68 | #include <linux/mutex.h> | 69 | #include <linux/mutex.h> |
70 | #include <linux/jiffies.h> | ||
69 | 71 | ||
70 | static DEFINE_MUTEX(dma_list_mutex); | 72 | static DEFINE_MUTEX(dma_list_mutex); |
71 | static LIST_HEAD(dma_device_list); | 73 | static LIST_HEAD(dma_device_list); |
@@ -165,6 +167,24 @@ static struct dma_chan *dma_client_chan_alloc(struct dma_client *client) | |||
165 | return NULL; | 167 | return NULL; |
166 | } | 168 | } |
167 | 169 | ||
170 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | ||
171 | { | ||
172 | enum dma_status status; | ||
173 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | ||
174 | |||
175 | dma_async_issue_pending(chan); | ||
176 | do { | ||
177 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | ||
178 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | ||
179 | printk(KERN_ERR "dma_sync_wait_timeout!\n"); | ||
180 | return DMA_ERROR; | ||
181 | } | ||
182 | } while (status == DMA_IN_PROGRESS); | ||
183 | |||
184 | return status; | ||
185 | } | ||
186 | EXPORT_SYMBOL(dma_sync_wait); | ||
187 | |||
168 | /** | 188 | /** |
169 | * dma_chan_cleanup - release a DMA channel's resources | 189 | * dma_chan_cleanup - release a DMA channel's resources |
170 | * @kref: kernel reference structure that contains the DMA channel device | 190 | * @kref: kernel reference structure that contains the DMA channel device |
@@ -322,6 +342,25 @@ int dma_async_device_register(struct dma_device *device) | |||
322 | if (!device) | 342 | if (!device) |
323 | return -ENODEV; | 343 | return -ENODEV; |
324 | 344 | ||
345 | /* validate device routines */ | ||
346 | BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && | ||
347 | !device->device_prep_dma_memcpy); | ||
348 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && | ||
349 | !device->device_prep_dma_xor); | ||
350 | BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && | ||
351 | !device->device_prep_dma_zero_sum); | ||
352 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | ||
353 | !device->device_prep_dma_memset); | ||
354 | BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && | ||
355 | !device->device_prep_dma_interrupt); | ||
356 | |||
357 | BUG_ON(!device->device_alloc_chan_resources); | ||
358 | BUG_ON(!device->device_free_chan_resources); | ||
359 | BUG_ON(!device->device_dependency_added); | ||
360 | BUG_ON(!device->device_is_tx_complete); | ||
361 | BUG_ON(!device->device_issue_pending); | ||
362 | BUG_ON(!device->dev); | ||
363 | |||
325 | init_completion(&device->done); | 364 | init_completion(&device->done); |
326 | kref_init(&device->refcount); | 365 | kref_init(&device->refcount); |
327 | device->dev_id = id++; | 366 | device->dev_id = id++; |
@@ -415,6 +454,149 @@ void dma_async_device_unregister(struct dma_device *device) | |||
415 | } | 454 | } |
416 | EXPORT_SYMBOL(dma_async_device_unregister); | 455 | EXPORT_SYMBOL(dma_async_device_unregister); |
417 | 456 | ||
457 | /** | ||
458 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | ||
459 | * @chan: DMA channel to offload copy to | ||
460 | * @dest: destination address (virtual) | ||
461 | * @src: source address (virtual) | ||
462 | * @len: length | ||
463 | * | ||
464 | * Both @dest and @src must be mappable to a bus address according to the | ||
465 | * DMA mapping API rules for streaming mappings. | ||
466 | * Both @dest and @src must stay memory resident (kernel memory or locked | ||
467 | * user space pages). | ||
468 | */ | ||
469 | dma_cookie_t | ||
470 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | ||
471 | void *src, size_t len) | ||
472 | { | ||
473 | struct dma_device *dev = chan->device; | ||
474 | struct dma_async_tx_descriptor *tx; | ||
475 | dma_addr_t addr; | ||
476 | dma_cookie_t cookie; | ||
477 | int cpu; | ||
478 | |||
479 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | ||
480 | if (!tx) | ||
481 | return -ENOMEM; | ||
482 | |||
483 | tx->ack = 1; | ||
484 | tx->callback = NULL; | ||
485 | addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | ||
486 | tx->tx_set_src(addr, tx, 0); | ||
487 | addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | ||
488 | tx->tx_set_dest(addr, tx, 0); | ||
489 | cookie = tx->tx_submit(tx); | ||
490 | |||
491 | cpu = get_cpu(); | ||
492 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | ||
493 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | ||
494 | put_cpu(); | ||
495 | |||
496 | return cookie; | ||
497 | } | ||
498 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | ||
499 | |||
500 | /** | ||
501 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | ||
502 | * @chan: DMA channel to offload copy to | ||
503 | * @page: destination page | ||
504 | * @offset: offset in page to copy to | ||
505 | * @kdata: source address (virtual) | ||
506 | * @len: length | ||
507 | * | ||
508 | * Both @page/@offset and @kdata must be mappable to a bus address according | ||
509 | * to the DMA mapping API rules for streaming mappings. | ||
510 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | ||
511 | * locked user space pages) | ||
512 | */ | ||
513 | dma_cookie_t | ||
514 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | ||
515 | unsigned int offset, void *kdata, size_t len) | ||
516 | { | ||
517 | struct dma_device *dev = chan->device; | ||
518 | struct dma_async_tx_descriptor *tx; | ||
519 | dma_addr_t addr; | ||
520 | dma_cookie_t cookie; | ||
521 | int cpu; | ||
522 | |||
523 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | ||
524 | if (!tx) | ||
525 | return -ENOMEM; | ||
526 | |||
527 | tx->ack = 1; | ||
528 | tx->callback = NULL; | ||
529 | addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | ||
530 | tx->tx_set_src(addr, tx, 0); | ||
531 | addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | ||
532 | tx->tx_set_dest(addr, tx, 0); | ||
533 | cookie = tx->tx_submit(tx); | ||
534 | |||
535 | cpu = get_cpu(); | ||
536 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | ||
537 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | ||
538 | put_cpu(); | ||
539 | |||
540 | return cookie; | ||
541 | } | ||
542 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | ||
543 | |||
544 | /** | ||
545 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | ||
546 | * @chan: DMA channel to offload copy to | ||
547 | * @dest_pg: destination page | ||
548 | * @dest_off: offset in page to copy to | ||
549 | * @src_pg: source page | ||
550 | * @src_off: offset in page to copy from | ||
551 | * @len: length | ||
552 | * | ||
553 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus | ||
554 | * address according to the DMA mapping API rules for streaming mappings. | ||
555 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident | ||
556 | * (kernel memory or locked user space pages). | ||
557 | */ | ||
558 | dma_cookie_t | ||
559 | dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | ||
560 | unsigned int dest_off, struct page *src_pg, unsigned int src_off, | ||
561 | size_t len) | ||
562 | { | ||
563 | struct dma_device *dev = chan->device; | ||
564 | struct dma_async_tx_descriptor *tx; | ||
565 | dma_addr_t addr; | ||
566 | dma_cookie_t cookie; | ||
567 | int cpu; | ||
568 | |||
569 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | ||
570 | if (!tx) | ||
571 | return -ENOMEM; | ||
572 | |||
573 | tx->ack = 1; | ||
574 | tx->callback = NULL; | ||
575 | addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | ||
576 | tx->tx_set_src(addr, tx, 0); | ||
577 | addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE); | ||
578 | tx->tx_set_dest(addr, tx, 0); | ||
579 | cookie = tx->tx_submit(tx); | ||
580 | |||
581 | cpu = get_cpu(); | ||
582 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | ||
583 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | ||
584 | put_cpu(); | ||
585 | |||
586 | return cookie; | ||
587 | } | ||
588 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | ||
589 | |||
590 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | ||
591 | struct dma_chan *chan) | ||
592 | { | ||
593 | tx->chan = chan; | ||
594 | spin_lock_init(&tx->lock); | ||
595 | INIT_LIST_HEAD(&tx->depend_node); | ||
596 | INIT_LIST_HEAD(&tx->depend_list); | ||
597 | } | ||
598 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | ||
599 | |||
418 | static int __init dma_bus_init(void) | 600 | static int __init dma_bus_init(void) |
419 | { | 601 | { |
420 | mutex_init(&dma_list_mutex); | 602 | mutex_init(&dma_list_mutex); |