diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:14 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:14 -0500 |
commit | 2ba05622b8b143b0c95968ba59bddfbd6d2f2559 (patch) | |
tree | b7b72d02a993ff2ba731d6608f4ab8ce87482bcb | |
parent | bec085134e446577a983f17f57d642a88d1af53b (diff) |
dmaengine: provide a common 'issue_pending_all' implementation
async_tx and net_dma each have open-coded versions of issue_pending_all,
so provide a common routine in dmaengine.
The implementation needs to walk the global device list, so implement
rcu to allow dma_issue_pending_all to run lockless. Clients protect
themselves from channel removal events by holding a dmaengine reference.
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | crypto/async_tx/async_tx.c | 12 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 27 | ||||
-rw-r--r-- | include/linux/async_tx.h | 2 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 1 | ||||
-rw-r--r-- | net/core/dev.c | 9 |
5 files changed, 27 insertions, 24 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index b88bb1f608fc..2cdf7a0867b7 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -45,18 +45,6 @@ static DEFINE_SPINLOCK(async_tx_lock); | |||
45 | 45 | ||
46 | static LIST_HEAD(async_tx_master_list); | 46 | static LIST_HEAD(async_tx_master_list); |
47 | 47 | ||
48 | /* async_tx_issue_pending_all - start all transactions on all channels */ | ||
49 | void async_tx_issue_pending_all(void) | ||
50 | { | ||
51 | struct dma_chan_ref *ref; | ||
52 | |||
53 | rcu_read_lock(); | ||
54 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
55 | ref->chan->device->device_issue_pending(ref->chan); | ||
56 | rcu_read_unlock(); | ||
57 | } | ||
58 | EXPORT_SYMBOL_GPL(async_tx_issue_pending_all); | ||
59 | |||
60 | static void | 48 | static void |
61 | free_dma_chan_ref(struct rcu_head *rcu) | 49 | free_dma_chan_ref(struct rcu_head *rcu) |
62 | { | 50 | { |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 87a8cd4791ed..418eca28d472 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -70,6 +70,7 @@ | |||
70 | #include <linux/rcupdate.h> | 70 | #include <linux/rcupdate.h> |
71 | #include <linux/mutex.h> | 71 | #include <linux/mutex.h> |
72 | #include <linux/jiffies.h> | 72 | #include <linux/jiffies.h> |
73 | #include <linux/rculist.h> | ||
73 | 74 | ||
74 | static DEFINE_MUTEX(dma_list_mutex); | 75 | static DEFINE_MUTEX(dma_list_mutex); |
75 | static LIST_HEAD(dma_device_list); | 76 | static LIST_HEAD(dma_device_list); |
@@ -366,6 +367,26 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | |||
366 | EXPORT_SYMBOL(dma_find_channel); | 367 | EXPORT_SYMBOL(dma_find_channel); |
367 | 368 | ||
368 | /** | 369 | /** |
370 | * dma_issue_pending_all - flush all pending operations across all channels | ||
371 | */ | ||
372 | void dma_issue_pending_all(void) | ||
373 | { | ||
374 | struct dma_device *device; | ||
375 | struct dma_chan *chan; | ||
376 | |||
377 | WARN_ONCE(dmaengine_ref_count == 0, | ||
378 | "client called %s without a reference", __func__); | ||
379 | |||
380 | rcu_read_lock(); | ||
381 | list_for_each_entry_rcu(device, &dma_device_list, global_node) | ||
382 | list_for_each_entry(chan, &device->channels, device_node) | ||
383 | if (chan->client_count) | ||
384 | device->device_issue_pending(chan); | ||
385 | rcu_read_unlock(); | ||
386 | } | ||
387 | EXPORT_SYMBOL(dma_issue_pending_all); | ||
388 | |||
389 | /** | ||
369 | * nth_chan - returns the nth channel of the given capability | 390 | * nth_chan - returns the nth channel of the given capability |
370 | * @cap: capability to match | 391 | * @cap: capability to match |
371 | * @n: nth channel desired | 392 | * @n: nth channel desired |
@@ -490,7 +511,7 @@ void dma_async_client_register(struct dma_client *client) | |||
490 | err = dma_chan_get(chan); | 511 | err = dma_chan_get(chan); |
491 | if (err == -ENODEV) { | 512 | if (err == -ENODEV) { |
492 | /* module removed before we could use it */ | 513 | /* module removed before we could use it */ |
493 | list_del_init(&device->global_node); | 514 | list_del_rcu(&device->global_node); |
494 | break; | 515 | break; |
495 | } else if (err) | 516 | } else if (err) |
496 | pr_err("dmaengine: failed to get %s: (%d)\n", | 517 | pr_err("dmaengine: failed to get %s: (%d)\n", |
@@ -635,7 +656,7 @@ int dma_async_device_register(struct dma_device *device) | |||
635 | goto err_out; | 656 | goto err_out; |
636 | } | 657 | } |
637 | } | 658 | } |
638 | list_add_tail(&device->global_node, &dma_device_list); | 659 | list_add_tail_rcu(&device->global_node, &dma_device_list); |
639 | dma_channel_rebalance(); | 660 | dma_channel_rebalance(); |
640 | mutex_unlock(&dma_list_mutex); | 661 | mutex_unlock(&dma_list_mutex); |
641 | 662 | ||
@@ -677,7 +698,7 @@ void dma_async_device_unregister(struct dma_device *device) | |||
677 | struct dma_chan *chan; | 698 | struct dma_chan *chan; |
678 | 699 | ||
679 | mutex_lock(&dma_list_mutex); | 700 | mutex_lock(&dma_list_mutex); |
680 | list_del(&device->global_node); | 701 | list_del_rcu(&device->global_node); |
681 | dma_channel_rebalance(); | 702 | dma_channel_rebalance(); |
682 | mutex_unlock(&dma_list_mutex); | 703 | mutex_unlock(&dma_list_mutex); |
683 | 704 | ||
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 1c816775f135..45f6297821bd 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h | |||
@@ -59,7 +59,7 @@ enum async_tx_flags { | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | #ifdef CONFIG_DMA_ENGINE | 61 | #ifdef CONFIG_DMA_ENGINE |
62 | void async_tx_issue_pending_all(void); | 62 | #define async_tx_issue_pending_all dma_issue_pending_all |
63 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 63 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
64 | #include <asm/async_tx.h> | 64 | #include <asm/async_tx.h> |
65 | #else | 65 | #else |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index b466f02e2433..57a43adfc39e 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -471,6 +471,7 @@ int dma_async_device_register(struct dma_device *device); | |||
471 | void dma_async_device_unregister(struct dma_device *device); | 471 | void dma_async_device_unregister(struct dma_device *device); |
472 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | 472 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); |
473 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | 473 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); |
474 | void dma_issue_pending_all(void); | ||
474 | 475 | ||
475 | /* --- Helper iov-locking functions --- */ | 476 | /* --- Helper iov-locking functions --- */ |
476 | 477 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 09c66a449da6..e40b0d57f8ff 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2635,14 +2635,7 @@ out: | |||
2635 | * There may not be any more sk_buffs coming right now, so push | 2635 | * There may not be any more sk_buffs coming right now, so push |
2636 | * any pending DMA copies to hardware | 2636 | * any pending DMA copies to hardware |
2637 | */ | 2637 | */ |
2638 | if (!cpus_empty(net_dma.channel_mask)) { | 2638 | dma_issue_pending_all(); |
2639 | int chan_idx; | ||
2640 | for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { | ||
2641 | struct dma_chan *chan = net_dma.channels[chan_idx]; | ||
2642 | if (chan) | ||
2643 | dma_async_memcpy_issue_pending(chan); | ||
2644 | } | ||
2645 | } | ||
2646 | #endif | 2639 | #endif |
2647 | 2640 | ||
2648 | return; | 2641 | return; |