aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:14 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:14 -0500
commit6f49a57aa5a0c6d4e4e27c85f7af6c83325a12d1 (patch)
treeafba24357d1f4ff69ccb2b39a19542546590a50b
parent07f2211e4fbce6990722d78c4f04225da9c0e9cf (diff)
dmaengine: up-level reference counting to the module level
Simply, if a client wants any dmaengine channel then prevent all dmaengine modules from being removed. Once the clients are done re-enable module removal. Why?, beyond reducing complication: 1/ Tracking reference counts per-transaction in an efficient manner, as is currently done, requires a complicated scheme to avoid cache-line bouncing effects. 2/ Per-transaction ref-counting gives the false impression that a dma-driver can be gracefully removed ahead of its user (net, md, or dma-slave) 3/ None of the in-tree dma-drivers talk to hot pluggable hardware, but if such an engine were built one day we still would not need to notify clients of remove events. The driver can simply return NULL to a ->prep() request, something that is much easier for a client to handle. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--crypto/async_tx/async_tx.c4
-rw-r--r--drivers/dma/dmaengine.c205
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c4
-rw-r--r--include/linux/dmaengine.h21
-rw-r--r--include/net/netdma.h4
-rw-r--r--net/ipv4/tcp.c1
8 files changed, 132 insertions, 111 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 8cfac182165d..43fe4cbe71e6 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -198,8 +198,6 @@ dma_channel_add_remove(struct dma_client *client,
198 /* add the channel to the generic management list */ 198 /* add the channel to the generic management list */
199 master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL); 199 master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL);
200 if (master_ref) { 200 if (master_ref) {
201 /* keep a reference until async_tx is unloaded */
202 dma_chan_get(chan);
203 init_dma_chan_ref(master_ref, chan); 201 init_dma_chan_ref(master_ref, chan);
204 spin_lock_irqsave(&async_tx_lock, flags); 202 spin_lock_irqsave(&async_tx_lock, flags);
205 list_add_tail_rcu(&master_ref->node, 203 list_add_tail_rcu(&master_ref->node,
@@ -221,8 +219,6 @@ dma_channel_add_remove(struct dma_client *client,
221 spin_lock_irqsave(&async_tx_lock, flags); 219 spin_lock_irqsave(&async_tx_lock, flags);
222 list_for_each_entry(ref, &async_tx_master_list, node) 220 list_for_each_entry(ref, &async_tx_master_list, node)
223 if (ref->chan == chan) { 221 if (ref->chan == chan) {
224 /* permit backing devices to go away */
225 dma_chan_put(ref->chan);
226 list_del_rcu(&ref->node); 222 list_del_rcu(&ref->node);
227 call_rcu(&ref->rcu, free_dma_chan_ref); 223 call_rcu(&ref->rcu, free_dma_chan_ref);
228 found = 1; 224 found = 1;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b9008932a8f3..d4d925912c47 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -74,6 +74,7 @@
74static DEFINE_MUTEX(dma_list_mutex); 74static DEFINE_MUTEX(dma_list_mutex);
75static LIST_HEAD(dma_device_list); 75static LIST_HEAD(dma_device_list);
76static LIST_HEAD(dma_client_list); 76static LIST_HEAD(dma_client_list);
77static long dmaengine_ref_count;
77 78
78/* --- sysfs implementation --- */ 79/* --- sysfs implementation --- */
79 80
@@ -105,19 +106,8 @@ static ssize_t show_bytes_transferred(struct device *dev, struct device_attribut
105static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) 106static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
106{ 107{
107 struct dma_chan *chan = to_dma_chan(dev); 108 struct dma_chan *chan = to_dma_chan(dev);
108 int in_use = 0;
109
110 if (unlikely(chan->slow_ref) &&
111 atomic_read(&chan->refcount.refcount) > 1)
112 in_use = 1;
113 else {
114 if (local_read(&(per_cpu_ptr(chan->local,
115 get_cpu())->refcount)) > 0)
116 in_use = 1;
117 put_cpu();
118 }
119 109
120 return sprintf(buf, "%d\n", in_use); 110 return sprintf(buf, "%d\n", chan->client_count);
121} 111}
122 112
123static struct device_attribute dma_attrs[] = { 113static struct device_attribute dma_attrs[] = {
@@ -155,6 +145,78 @@ __dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
155 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 145 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
156} 146}
157 147
148static struct module *dma_chan_to_owner(struct dma_chan *chan)
149{
150 return chan->device->dev->driver->owner;
151}
152
153/**
154 * balance_ref_count - catch up the channel reference count
155 * @chan - channel to balance ->client_count versus dmaengine_ref_count
156 *
157 * balance_ref_count must be called under dma_list_mutex
158 */
159static void balance_ref_count(struct dma_chan *chan)
160{
161 struct module *owner = dma_chan_to_owner(chan);
162
163 while (chan->client_count < dmaengine_ref_count) {
164 __module_get(owner);
165 chan->client_count++;
166 }
167}
168
169/**
170 * dma_chan_get - try to grab a dma channel's parent driver module
171 * @chan - channel to grab
172 *
173 * Must be called under dma_list_mutex
174 */
175static int dma_chan_get(struct dma_chan *chan)
176{
177 int err = -ENODEV;
178 struct module *owner = dma_chan_to_owner(chan);
179
180 if (chan->client_count) {
181 __module_get(owner);
182 err = 0;
183 } else if (try_module_get(owner))
184 err = 0;
185
186 if (err == 0)
187 chan->client_count++;
188
189 /* allocate upon first client reference */
190 if (chan->client_count == 1 && err == 0) {
191 int desc_cnt = chan->device->device_alloc_chan_resources(chan, NULL);
192
193 if (desc_cnt < 0) {
194 err = desc_cnt;
195 chan->client_count = 0;
196 module_put(owner);
197 } else
198 balance_ref_count(chan);
199 }
200
201 return err;
202}
203
204/**
205 * dma_chan_put - drop a reference to a dma channel's parent driver module
206 * @chan - channel to release
207 *
208 * Must be called under dma_list_mutex
209 */
210static void dma_chan_put(struct dma_chan *chan)
211{
212 if (!chan->client_count)
213 return; /* this channel failed alloc_chan_resources */
214 chan->client_count--;
215 module_put(dma_chan_to_owner(chan));
216 if (chan->client_count == 0)
217 chan->device->device_free_chan_resources(chan);
218}
219
158/** 220/**
159 * dma_client_chan_alloc - try to allocate channels to a client 221 * dma_client_chan_alloc - try to allocate channels to a client
160 * @client: &dma_client 222 * @client: &dma_client
@@ -165,7 +227,6 @@ static void dma_client_chan_alloc(struct dma_client *client)
165{ 227{
166 struct dma_device *device; 228 struct dma_device *device;
167 struct dma_chan *chan; 229 struct dma_chan *chan;
168 int desc; /* allocated descriptor count */
169 enum dma_state_client ack; 230 enum dma_state_client ack;
170 231
171 /* Find a channel */ 232 /* Find a channel */
@@ -178,23 +239,16 @@ static void dma_client_chan_alloc(struct dma_client *client)
178 list_for_each_entry(chan, &device->channels, device_node) { 239 list_for_each_entry(chan, &device->channels, device_node) {
179 if (!dma_chan_satisfies_mask(chan, client->cap_mask)) 240 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
180 continue; 241 continue;
242 if (!chan->client_count)
243 continue;
244 ack = client->event_callback(client, chan,
245 DMA_RESOURCE_AVAILABLE);
181 246
182 desc = chan->device->device_alloc_chan_resources( 247 /* we are done once this client rejects
183 chan, client); 248 * an available resource
184 if (desc >= 0) { 249 */
185 ack = client->event_callback(client, 250 if (ack == DMA_NAK)
186 chan, 251 return;
187 DMA_RESOURCE_AVAILABLE);
188
189 /* we are done once this client rejects
190 * an available resource
191 */
192 if (ack == DMA_ACK) {
193 dma_chan_get(chan);
194 chan->client_count++;
195 } else if (ack == DMA_NAK)
196 return;
197 }
198 } 252 }
199 } 253 }
200} 254}
@@ -224,7 +278,6 @@ EXPORT_SYMBOL(dma_sync_wait);
224void dma_chan_cleanup(struct kref *kref) 278void dma_chan_cleanup(struct kref *kref)
225{ 279{
226 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); 280 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
227 chan->device->device_free_chan_resources(chan);
228 kref_put(&chan->device->refcount, dma_async_device_cleanup); 281 kref_put(&chan->device->refcount, dma_async_device_cleanup);
229} 282}
230EXPORT_SYMBOL(dma_chan_cleanup); 283EXPORT_SYMBOL(dma_chan_cleanup);
@@ -232,18 +285,12 @@ EXPORT_SYMBOL(dma_chan_cleanup);
232static void dma_chan_free_rcu(struct rcu_head *rcu) 285static void dma_chan_free_rcu(struct rcu_head *rcu)
233{ 286{
234 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); 287 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
235 int bias = 0x7FFFFFFF; 288
236 int i;
237 for_each_possible_cpu(i)
238 bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
239 atomic_sub(bias, &chan->refcount.refcount);
240 kref_put(&chan->refcount, dma_chan_cleanup); 289 kref_put(&chan->refcount, dma_chan_cleanup);
241} 290}
242 291
243static void dma_chan_release(struct dma_chan *chan) 292static void dma_chan_release(struct dma_chan *chan)
244{ 293{
245 atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
246 chan->slow_ref = 1;
247 call_rcu(&chan->rcu, dma_chan_free_rcu); 294 call_rcu(&chan->rcu, dma_chan_free_rcu);
248} 295}
249 296
@@ -263,43 +310,36 @@ static void dma_clients_notify_available(void)
263} 310}
264 311
265/** 312/**
266 * dma_chans_notify_available - tell the clients that a channel is going away
267 * @chan: channel on its way out
268 */
269static void dma_clients_notify_removed(struct dma_chan *chan)
270{
271 struct dma_client *client;
272 enum dma_state_client ack;
273
274 mutex_lock(&dma_list_mutex);
275
276 list_for_each_entry(client, &dma_client_list, global_node) {
277 ack = client->event_callback(client, chan,
278 DMA_RESOURCE_REMOVED);
279
280 /* client was holding resources for this channel so
281 * free it
282 */
283 if (ack == DMA_ACK) {
284 dma_chan_put(chan);
285 chan->client_count--;
286 }
287 }
288
289 mutex_unlock(&dma_list_mutex);
290}
291
292/**
293 * dma_async_client_register - register a &dma_client 313 * dma_async_client_register - register a &dma_client
294 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' 314 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
295 */ 315 */
296void dma_async_client_register(struct dma_client *client) 316void dma_async_client_register(struct dma_client *client)
297{ 317{
318 struct dma_device *device, *_d;
319 struct dma_chan *chan;
320 int err;
321
298 /* validate client data */ 322 /* validate client data */
299 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) && 323 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
300 !client->slave); 324 !client->slave);
301 325
302 mutex_lock(&dma_list_mutex); 326 mutex_lock(&dma_list_mutex);
327 dmaengine_ref_count++;
328
329 /* try to grab channels */
330 list_for_each_entry_safe(device, _d, &dma_device_list, global_node)
331 list_for_each_entry(chan, &device->channels, device_node) {
332 err = dma_chan_get(chan);
333 if (err == -ENODEV) {
334 /* module removed before we could use it */
335 list_del_init(&device->global_node);
336 break;
337 } else if (err)
338 pr_err("dmaengine: failed to get %s: (%d)\n",
339 dev_name(&chan->dev), err);
340 }
341
342
303 list_add_tail(&client->global_node, &dma_client_list); 343 list_add_tail(&client->global_node, &dma_client_list);
304 mutex_unlock(&dma_list_mutex); 344 mutex_unlock(&dma_list_mutex);
305} 345}
@@ -315,23 +355,17 @@ void dma_async_client_unregister(struct dma_client *client)
315{ 355{
316 struct dma_device *device; 356 struct dma_device *device;
317 struct dma_chan *chan; 357 struct dma_chan *chan;
318 enum dma_state_client ack;
319 358
320 if (!client) 359 if (!client)
321 return; 360 return;
322 361
323 mutex_lock(&dma_list_mutex); 362 mutex_lock(&dma_list_mutex);
324 /* free all channels the client is holding */ 363 dmaengine_ref_count--;
364 BUG_ON(dmaengine_ref_count < 0);
365 /* drop channel references */
325 list_for_each_entry(device, &dma_device_list, global_node) 366 list_for_each_entry(device, &dma_device_list, global_node)
326 list_for_each_entry(chan, &device->channels, device_node) { 367 list_for_each_entry(chan, &device->channels, device_node)
327 ack = client->event_callback(client, chan, 368 dma_chan_put(chan);
328 DMA_RESOURCE_REMOVED);
329
330 if (ack == DMA_ACK) {
331 dma_chan_put(chan);
332 chan->client_count--;
333 }
334 }
335 369
336 list_del(&client->global_node); 370 list_del(&client->global_node);
337 mutex_unlock(&dma_list_mutex); 371 mutex_unlock(&dma_list_mutex);
@@ -423,6 +457,21 @@ int dma_async_device_register(struct dma_device *device)
423 } 457 }
424 458
425 mutex_lock(&dma_list_mutex); 459 mutex_lock(&dma_list_mutex);
460 if (dmaengine_ref_count)
461 list_for_each_entry(chan, &device->channels, device_node) {
462 /* if clients are already waiting for channels we need
463 * to take references on their behalf
464 */
465 if (dma_chan_get(chan) == -ENODEV) {
466 /* note we can only get here for the first
467 * channel as the remaining channels are
468 * guaranteed to get a reference
469 */
470 rc = -ENODEV;
471 mutex_unlock(&dma_list_mutex);
472 goto err_out;
473 }
474 }
426 list_add_tail(&device->global_node, &dma_device_list); 475 list_add_tail(&device->global_node, &dma_device_list);
427 mutex_unlock(&dma_list_mutex); 476 mutex_unlock(&dma_list_mutex);
428 477
@@ -456,7 +505,7 @@ static void dma_async_device_cleanup(struct kref *kref)
456} 505}
457 506
458/** 507/**
459 * dma_async_device_unregister - unregisters DMA devices 508 * dma_async_device_unregister - unregister a DMA device
460 * @device: &dma_device 509 * @device: &dma_device
461 */ 510 */
462void dma_async_device_unregister(struct dma_device *device) 511void dma_async_device_unregister(struct dma_device *device)
@@ -468,7 +517,9 @@ void dma_async_device_unregister(struct dma_device *device)
468 mutex_unlock(&dma_list_mutex); 517 mutex_unlock(&dma_list_mutex);
469 518
470 list_for_each_entry(chan, &device->channels, device_node) { 519 list_for_each_entry(chan, &device->channels, device_node) {
471 dma_clients_notify_removed(chan); 520 WARN_ONCE(chan->client_count,
521 "%s called while %d clients hold a reference\n",
522 __func__, chan->client_count);
472 device_unregister(&chan->dev); 523 device_unregister(&chan->dev);
473 dma_chan_release(chan); 524 dma_chan_release(chan);
474 } 525 }
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ed9636bfb54a..db4050884713 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -215,7 +215,6 @@ static int dmatest_func(void *data)
215 215
216 smp_rmb(); 216 smp_rmb();
217 chan = thread->chan; 217 chan = thread->chan;
218 dma_chan_get(chan);
219 218
220 while (!kthread_should_stop()) { 219 while (!kthread_should_stop()) {
221 total_tests++; 220 total_tests++;
@@ -293,7 +292,6 @@ static int dmatest_func(void *data)
293 } 292 }
294 293
295 ret = 0; 294 ret = 0;
296 dma_chan_put(chan);
297 kfree(thread->dstbuf); 295 kfree(thread->dstbuf);
298err_dstbuf: 296err_dstbuf:
299 kfree(thread->srcbuf); 297 kfree(thread->srcbuf);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 0778d99aea7c..377dafa37a20 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -773,7 +773,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
773 dev_vdbg(&chan->dev, "alloc_chan_resources\n"); 773 dev_vdbg(&chan->dev, "alloc_chan_resources\n");
774 774
775 /* Channels doing slave DMA can only handle one client. */ 775 /* Channels doing slave DMA can only handle one client. */
776 if (dwc->dws || client->slave) { 776 if (dwc->dws || (client && client->slave)) {
777 if (chan->client_count) 777 if (chan->client_count)
778 return -EBUSY; 778 return -EBUSY;
779 } 779 }
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 7a3f2436b011..6c11f4d4c4e9 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -593,10 +593,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
593 593
594 /* If we don't have a channel, we can't do DMA */ 594 /* If we don't have a channel, we can't do DMA */
595 chan = host->dma.chan; 595 chan = host->dma.chan;
596 if (chan) { 596 if (chan)
597 dma_chan_get(chan);
598 host->data_chan = chan; 597 host->data_chan = chan;
599 }
600 598
601 if (!chan) 599 if (!chan)
602 return -ENODEV; 600 return -ENODEV;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e4ec7e7b8056..d18d37d1015d 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -165,7 +165,6 @@ struct dma_slave {
165 */ 165 */
166 166
167struct dma_chan_percpu { 167struct dma_chan_percpu {
168 local_t refcount;
169 /* stats */ 168 /* stats */
170 unsigned long memcpy_count; 169 unsigned long memcpy_count;
171 unsigned long bytes_transferred; 170 unsigned long bytes_transferred;
@@ -205,26 +204,6 @@ struct dma_chan {
205 204
206void dma_chan_cleanup(struct kref *kref); 205void dma_chan_cleanup(struct kref *kref);
207 206
208static inline void dma_chan_get(struct dma_chan *chan)
209{
210 if (unlikely(chan->slow_ref))
211 kref_get(&chan->refcount);
212 else {
213 local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
214 put_cpu();
215 }
216}
217
218static inline void dma_chan_put(struct dma_chan *chan)
219{
220 if (unlikely(chan->slow_ref))
221 kref_put(&chan->refcount, dma_chan_cleanup);
222 else {
223 local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
224 put_cpu();
225 }
226}
227
228/* 207/*
229 * typedef dma_event_callback - function pointer to a DMA event callback 208 * typedef dma_event_callback - function pointer to a DMA event callback
230 * For each channel added to the system this routine is called for each client. 209 * For each channel added to the system this routine is called for each client.
diff --git a/include/net/netdma.h b/include/net/netdma.h
index f28c6e064e8f..cbe2737f4a61 100644
--- a/include/net/netdma.h
+++ b/include/net/netdma.h
@@ -27,11 +27,11 @@
27static inline struct dma_chan *get_softnet_dma(void) 27static inline struct dma_chan *get_softnet_dma(void)
28{ 28{
29 struct dma_chan *chan; 29 struct dma_chan *chan;
30
30 rcu_read_lock(); 31 rcu_read_lock();
31 chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma); 32 chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma);
32 if (chan)
33 dma_chan_get(chan);
34 rcu_read_unlock(); 33 rcu_read_unlock();
34
35 return chan; 35 return chan;
36} 36}
37 37
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f28acf11fc67..75e0e0a2d8db 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1632,7 +1632,6 @@ skip_copy:
1632 1632
1633 /* Safe to free early-copied skbs now */ 1633 /* Safe to free early-copied skbs now */
1634 __skb_queue_purge(&sk->sk_async_wait_queue); 1634 __skb_queue_purge(&sk->sk_async_wait_queue);
1635 dma_chan_put(tp->ucopy.dma_chan);
1636 tp->ucopy.dma_chan = NULL; 1635 tp->ucopy.dma_chan = NULL;
1637 } 1636 }
1638 if (tp->ucopy.pinned_list) { 1637 if (tp->ucopy.pinned_list) {