aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:17 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:17 -0500
commit209b84a88fe81341b4d8d465acc4a67cb7c3feb3 (patch)
tree134632ed8c914f0ee497e7a22bc616d84e068119
parent74465b4ff9ac1da503025c0a0042e023bfa6505c (diff)
dmaengine: replace dma_async_client_register with dmaengine_get
Now that clients no longer need to be notified of channel arrival dma_async_client_register can simply increment the dmaengine_ref_count. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--crypto/async_tx/async_tx.c115
-rw-r--r--drivers/dma/dmaengine.c22
-rw-r--r--include/linux/dmaengine.h4
-rw-r--r--net/core/dev.c3
4 files changed, 11 insertions, 133 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 2cdf7a0867b7..f21147f3626a 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -28,120 +28,9 @@
28#include <linux/async_tx.h> 28#include <linux/async_tx.h>
29 29
30#ifdef CONFIG_DMA_ENGINE 30#ifdef CONFIG_DMA_ENGINE
31static enum dma_state_client
32dma_channel_add_remove(struct dma_client *client,
33 struct dma_chan *chan, enum dma_state state);
34
35static struct dma_client async_tx_dma = {
36 .event_callback = dma_channel_add_remove,
37 /* .cap_mask == 0 defaults to all channels */
38};
39
40/**
41 * async_tx_lock - protect modification of async_tx_master_list and serialize
42 * rebalance operations
43 */
44static DEFINE_SPINLOCK(async_tx_lock);
45
46static LIST_HEAD(async_tx_master_list);
47
48static void
49free_dma_chan_ref(struct rcu_head *rcu)
50{
51 struct dma_chan_ref *ref;
52 ref = container_of(rcu, struct dma_chan_ref, rcu);
53 kfree(ref);
54}
55
56static void
57init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan)
58{
59 INIT_LIST_HEAD(&ref->node);
60 INIT_RCU_HEAD(&ref->rcu);
61 ref->chan = chan;
62 atomic_set(&ref->count, 0);
63}
64
65static enum dma_state_client
66dma_channel_add_remove(struct dma_client *client,
67 struct dma_chan *chan, enum dma_state state)
68{
69 unsigned long found, flags;
70 struct dma_chan_ref *master_ref, *ref;
71 enum dma_state_client ack = DMA_DUP; /* default: take no action */
72
73 switch (state) {
74 case DMA_RESOURCE_AVAILABLE:
75 found = 0;
76 rcu_read_lock();
77 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
78 if (ref->chan == chan) {
79 found = 1;
80 break;
81 }
82 rcu_read_unlock();
83
84 pr_debug("async_tx: dma resource available [%s]\n",
85 found ? "old" : "new");
86
87 if (!found)
88 ack = DMA_ACK;
89 else
90 break;
91
92 /* add the channel to the generic management list */
93 master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL);
94 if (master_ref) {
95 init_dma_chan_ref(master_ref, chan);
96 spin_lock_irqsave(&async_tx_lock, flags);
97 list_add_tail_rcu(&master_ref->node,
98 &async_tx_master_list);
99 spin_unlock_irqrestore(&async_tx_lock,
100 flags);
101 } else {
102 printk(KERN_WARNING "async_tx: unable to create"
103 " new master entry in response to"
104 " a DMA_RESOURCE_ADDED event"
105 " (-ENOMEM)\n");
106 return 0;
107 }
108 break;
109 case DMA_RESOURCE_REMOVED:
110 found = 0;
111 spin_lock_irqsave(&async_tx_lock, flags);
112 list_for_each_entry(ref, &async_tx_master_list, node)
113 if (ref->chan == chan) {
114 list_del_rcu(&ref->node);
115 call_rcu(&ref->rcu, free_dma_chan_ref);
116 found = 1;
117 break;
118 }
119 spin_unlock_irqrestore(&async_tx_lock, flags);
120
121 pr_debug("async_tx: dma resource removed [%s]\n",
122 found ? "ours" : "not ours");
123
124 if (found)
125 ack = DMA_ACK;
126 else
127 break;
128 break;
129 case DMA_RESOURCE_SUSPEND:
130 case DMA_RESOURCE_RESUME:
131 printk(KERN_WARNING "async_tx: does not support dma channel"
132 " suspend/resume\n");
133 break;
134 default:
135 BUG();
136 }
137
138 return ack;
139}
140
141static int __init async_tx_init(void) 31static int __init async_tx_init(void)
142{ 32{
143 dma_async_client_register(&async_tx_dma); 33 dmaengine_get();
144 dma_async_client_chan_request(&async_tx_dma);
145 34
146 printk(KERN_INFO "async_tx: api initialized (async)\n"); 35 printk(KERN_INFO "async_tx: api initialized (async)\n");
147 36
@@ -150,7 +39,7 @@ static int __init async_tx_init(void)
150 39
151static void __exit async_tx_exit(void) 40static void __exit async_tx_exit(void)
152{ 41{
153 dma_async_client_unregister(&async_tx_dma); 42 dmaengine_put();
154} 43}
155 44
156/** 45/**
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 90aca505a1df..3f1849b7f5ef 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -600,10 +600,9 @@ static void dma_clients_notify_available(void)
600} 600}
601 601
602/** 602/**
603 * dma_async_client_register - register a &dma_client 603 * dmaengine_get - register interest in dma_channels
604 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
605 */ 604 */
606void dma_async_client_register(struct dma_client *client) 605void dmaengine_get(void)
607{ 606{
608 struct dma_device *device, *_d; 607 struct dma_device *device, *_d;
609 struct dma_chan *chan; 608 struct dma_chan *chan;
@@ -634,25 +633,18 @@ void dma_async_client_register(struct dma_client *client)
634 */ 633 */
635 if (dmaengine_ref_count == 1) 634 if (dmaengine_ref_count == 1)
636 dma_channel_rebalance(); 635 dma_channel_rebalance();
637 list_add_tail(&client->global_node, &dma_client_list);
638 mutex_unlock(&dma_list_mutex); 636 mutex_unlock(&dma_list_mutex);
639} 637}
640EXPORT_SYMBOL(dma_async_client_register); 638EXPORT_SYMBOL(dmaengine_get);
641 639
642/** 640/**
643 * dma_async_client_unregister - unregister a client and free the &dma_client 641 * dmaengine_put - let dma drivers be removed when ref_count == 0
644 * @client: &dma_client to free
645 *
646 * Force frees any allocated DMA channels, frees the &dma_client memory
647 */ 642 */
648void dma_async_client_unregister(struct dma_client *client) 643void dmaengine_put(void)
649{ 644{
650 struct dma_device *device; 645 struct dma_device *device;
651 struct dma_chan *chan; 646 struct dma_chan *chan;
652 647
653 if (!client)
654 return;
655
656 mutex_lock(&dma_list_mutex); 648 mutex_lock(&dma_list_mutex);
657 dmaengine_ref_count--; 649 dmaengine_ref_count--;
658 BUG_ON(dmaengine_ref_count < 0); 650 BUG_ON(dmaengine_ref_count < 0);
@@ -663,11 +655,9 @@ void dma_async_client_unregister(struct dma_client *client)
663 list_for_each_entry(chan, &device->channels, device_node) 655 list_for_each_entry(chan, &device->channels, device_node)
664 dma_chan_put(chan); 656 dma_chan_put(chan);
665 } 657 }
666
667 list_del(&client->global_node);
668 mutex_unlock(&dma_list_mutex); 658 mutex_unlock(&dma_list_mutex);
669} 659}
670EXPORT_SYMBOL(dma_async_client_unregister); 660EXPORT_SYMBOL(dmaengine_put);
671 661
672/** 662/**
673 * dma_async_client_chan_request - send all available channels to the 663 * dma_async_client_chan_request - send all available channels to the
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index d63544cf8a1a..37d95db156d3 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -318,8 +318,8 @@ struct dma_device {
318 318
319/* --- public DMA engine API --- */ 319/* --- public DMA engine API --- */
320 320
321void dma_async_client_register(struct dma_client *client); 321void dmaengine_get(void);
322void dma_async_client_unregister(struct dma_client *client); 322void dmaengine_put(void);
323void dma_async_client_chan_request(struct dma_client *client); 323void dma_async_client_chan_request(struct dma_client *client);
324dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 324dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
325 void *dest, void *src, size_t len); 325 void *dest, void *src, size_t len);
diff --git a/net/core/dev.c b/net/core/dev.c
index bbb07dbe1740..7596fc9403c8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4894,8 +4894,7 @@ static int __init netdev_dma_register(void)
4894 } 4894 }
4895 spin_lock_init(&net_dma.lock); 4895 spin_lock_init(&net_dma.lock);
4896 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); 4896 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4897 dma_async_client_register(&net_dma.client); 4897 dmaengine_get();
4898 dma_async_client_chan_request(&net_dma.client);
4899 return 0; 4898 return 0;
4900} 4899}
4901 4900