aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:14 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:14 -0500
commit6f49a57aa5a0c6d4e4e27c85f7af6c83325a12d1 (patch)
treeafba24357d1f4ff69ccb2b39a19542546590a50b /drivers/dma/dmaengine.c
parent07f2211e4fbce6990722d78c4f04225da9c0e9cf (diff)
dmaengine: up-level reference counting to the module level
Simply, if a client wants any dmaengine channel then prevent all dmaengine modules from being removed. Once the clients are done re-enable module removal. Why?, beyond reducing complication: 1/ Tracking reference counts per-transaction in an efficient manner, as is currently done, requires a complicated scheme to avoid cache-line bouncing effects. 2/ Per-transaction ref-counting gives the false impression that a dma-driver can be gracefully removed ahead of its user (net, md, or dma-slave) 3/ None of the in-tree dma-drivers talk to hot pluggable hardware, but if such an engine were built one day we still would not need to notify clients of remove events. The driver can simply return NULL to a ->prep() request, something that is much easier for a client to handle. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c205
1 files changed, 128 insertions, 77 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b9008932a8f3..d4d925912c47 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -74,6 +74,7 @@
74static DEFINE_MUTEX(dma_list_mutex); 74static DEFINE_MUTEX(dma_list_mutex);
75static LIST_HEAD(dma_device_list); 75static LIST_HEAD(dma_device_list);
76static LIST_HEAD(dma_client_list); 76static LIST_HEAD(dma_client_list);
77static long dmaengine_ref_count;
77 78
78/* --- sysfs implementation --- */ 79/* --- sysfs implementation --- */
79 80
@@ -105,19 +106,8 @@ static ssize_t show_bytes_transferred(struct device *dev, struct device_attribut
105static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) 106static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
106{ 107{
107 struct dma_chan *chan = to_dma_chan(dev); 108 struct dma_chan *chan = to_dma_chan(dev);
108 int in_use = 0;
109
110 if (unlikely(chan->slow_ref) &&
111 atomic_read(&chan->refcount.refcount) > 1)
112 in_use = 1;
113 else {
114 if (local_read(&(per_cpu_ptr(chan->local,
115 get_cpu())->refcount)) > 0)
116 in_use = 1;
117 put_cpu();
118 }
119 109
120 return sprintf(buf, "%d\n", in_use); 110 return sprintf(buf, "%d\n", chan->client_count);
121} 111}
122 112
123static struct device_attribute dma_attrs[] = { 113static struct device_attribute dma_attrs[] = {
@@ -155,6 +145,78 @@ __dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
155 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 145 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
156} 146}
157 147
148static struct module *dma_chan_to_owner(struct dma_chan *chan)
149{
150 return chan->device->dev->driver->owner;
151}
152
153/**
154 * balance_ref_count - catch up the channel reference count
155 * @chan - channel to balance ->client_count versus dmaengine_ref_count
156 *
157 * balance_ref_count must be called under dma_list_mutex
158 */
159static void balance_ref_count(struct dma_chan *chan)
160{
161 struct module *owner = dma_chan_to_owner(chan);
162
163 while (chan->client_count < dmaengine_ref_count) {
164 __module_get(owner);
165 chan->client_count++;
166 }
167}
168
169/**
170 * dma_chan_get - try to grab a dma channel's parent driver module
171 * @chan - channel to grab
172 *
173 * Must be called under dma_list_mutex
174 */
175static int dma_chan_get(struct dma_chan *chan)
176{
177 int err = -ENODEV;
178 struct module *owner = dma_chan_to_owner(chan);
179
180 if (chan->client_count) {
181 __module_get(owner);
182 err = 0;
183 } else if (try_module_get(owner))
184 err = 0;
185
186 if (err == 0)
187 chan->client_count++;
188
189 /* allocate upon first client reference */
190 if (chan->client_count == 1 && err == 0) {
191 int desc_cnt = chan->device->device_alloc_chan_resources(chan, NULL);
192
193 if (desc_cnt < 0) {
194 err = desc_cnt;
195 chan->client_count = 0;
196 module_put(owner);
197 } else
198 balance_ref_count(chan);
199 }
200
201 return err;
202}
203
204/**
205 * dma_chan_put - drop a reference to a dma channel's parent driver module
206 * @chan - channel to release
207 *
208 * Must be called under dma_list_mutex
209 */
210static void dma_chan_put(struct dma_chan *chan)
211{
212 if (!chan->client_count)
213 return; /* this channel failed alloc_chan_resources */
214 chan->client_count--;
215 module_put(dma_chan_to_owner(chan));
216 if (chan->client_count == 0)
217 chan->device->device_free_chan_resources(chan);
218}
219
158/** 220/**
159 * dma_client_chan_alloc - try to allocate channels to a client 221 * dma_client_chan_alloc - try to allocate channels to a client
160 * @client: &dma_client 222 * @client: &dma_client
@@ -165,7 +227,6 @@ static void dma_client_chan_alloc(struct dma_client *client)
165{ 227{
166 struct dma_device *device; 228 struct dma_device *device;
167 struct dma_chan *chan; 229 struct dma_chan *chan;
168 int desc; /* allocated descriptor count */
169 enum dma_state_client ack; 230 enum dma_state_client ack;
170 231
171 /* Find a channel */ 232 /* Find a channel */
@@ -178,23 +239,16 @@ static void dma_client_chan_alloc(struct dma_client *client)
178 list_for_each_entry(chan, &device->channels, device_node) { 239 list_for_each_entry(chan, &device->channels, device_node) {
179 if (!dma_chan_satisfies_mask(chan, client->cap_mask)) 240 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
180 continue; 241 continue;
242 if (!chan->client_count)
243 continue;
244 ack = client->event_callback(client, chan,
245 DMA_RESOURCE_AVAILABLE);
181 246
182 desc = chan->device->device_alloc_chan_resources( 247 /* we are done once this client rejects
183 chan, client); 248 * an available resource
184 if (desc >= 0) { 249 */
185 ack = client->event_callback(client, 250 if (ack == DMA_NAK)
186 chan, 251 return;
187 DMA_RESOURCE_AVAILABLE);
188
189 /* we are done once this client rejects
190 * an available resource
191 */
192 if (ack == DMA_ACK) {
193 dma_chan_get(chan);
194 chan->client_count++;
195 } else if (ack == DMA_NAK)
196 return;
197 }
198 } 252 }
199 } 253 }
200} 254}
@@ -224,7 +278,6 @@ EXPORT_SYMBOL(dma_sync_wait);
224void dma_chan_cleanup(struct kref *kref) 278void dma_chan_cleanup(struct kref *kref)
225{ 279{
226 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); 280 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
227 chan->device->device_free_chan_resources(chan);
228 kref_put(&chan->device->refcount, dma_async_device_cleanup); 281 kref_put(&chan->device->refcount, dma_async_device_cleanup);
229} 282}
230EXPORT_SYMBOL(dma_chan_cleanup); 283EXPORT_SYMBOL(dma_chan_cleanup);
@@ -232,18 +285,12 @@ EXPORT_SYMBOL(dma_chan_cleanup);
232static void dma_chan_free_rcu(struct rcu_head *rcu) 285static void dma_chan_free_rcu(struct rcu_head *rcu)
233{ 286{
234 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); 287 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
235 int bias = 0x7FFFFFFF; 288
236 int i;
237 for_each_possible_cpu(i)
238 bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
239 atomic_sub(bias, &chan->refcount.refcount);
240 kref_put(&chan->refcount, dma_chan_cleanup); 289 kref_put(&chan->refcount, dma_chan_cleanup);
241} 290}
242 291
243static void dma_chan_release(struct dma_chan *chan) 292static void dma_chan_release(struct dma_chan *chan)
244{ 293{
245 atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
246 chan->slow_ref = 1;
247 call_rcu(&chan->rcu, dma_chan_free_rcu); 294 call_rcu(&chan->rcu, dma_chan_free_rcu);
248} 295}
249 296
@@ -263,43 +310,36 @@ static void dma_clients_notify_available(void)
263} 310}
264 311
265/** 312/**
266 * dma_chans_notify_available - tell the clients that a channel is going away
267 * @chan: channel on its way out
268 */
269static void dma_clients_notify_removed(struct dma_chan *chan)
270{
271 struct dma_client *client;
272 enum dma_state_client ack;
273
274 mutex_lock(&dma_list_mutex);
275
276 list_for_each_entry(client, &dma_client_list, global_node) {
277 ack = client->event_callback(client, chan,
278 DMA_RESOURCE_REMOVED);
279
280 /* client was holding resources for this channel so
281 * free it
282 */
283 if (ack == DMA_ACK) {
284 dma_chan_put(chan);
285 chan->client_count--;
286 }
287 }
288
289 mutex_unlock(&dma_list_mutex);
290}
291
292/**
293 * dma_async_client_register - register a &dma_client 313 * dma_async_client_register - register a &dma_client
294 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' 314 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
295 */ 315 */
296void dma_async_client_register(struct dma_client *client) 316void dma_async_client_register(struct dma_client *client)
297{ 317{
318 struct dma_device *device, *_d;
319 struct dma_chan *chan;
320 int err;
321
298 /* validate client data */ 322 /* validate client data */
299 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) && 323 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
300 !client->slave); 324 !client->slave);
301 325
302 mutex_lock(&dma_list_mutex); 326 mutex_lock(&dma_list_mutex);
327 dmaengine_ref_count++;
328
329 /* try to grab channels */
330 list_for_each_entry_safe(device, _d, &dma_device_list, global_node)
331 list_for_each_entry(chan, &device->channels, device_node) {
332 err = dma_chan_get(chan);
333 if (err == -ENODEV) {
334 /* module removed before we could use it */
335 list_del_init(&device->global_node);
336 break;
337 } else if (err)
338 pr_err("dmaengine: failed to get %s: (%d)\n",
339 dev_name(&chan->dev), err);
340 }
341
342
303 list_add_tail(&client->global_node, &dma_client_list); 343 list_add_tail(&client->global_node, &dma_client_list);
304 mutex_unlock(&dma_list_mutex); 344 mutex_unlock(&dma_list_mutex);
305} 345}
@@ -315,23 +355,17 @@ void dma_async_client_unregister(struct dma_client *client)
315{ 355{
316 struct dma_device *device; 356 struct dma_device *device;
317 struct dma_chan *chan; 357 struct dma_chan *chan;
318 enum dma_state_client ack;
319 358
320 if (!client) 359 if (!client)
321 return; 360 return;
322 361
323 mutex_lock(&dma_list_mutex); 362 mutex_lock(&dma_list_mutex);
324 /* free all channels the client is holding */ 363 dmaengine_ref_count--;
364 BUG_ON(dmaengine_ref_count < 0);
365 /* drop channel references */
325 list_for_each_entry(device, &dma_device_list, global_node) 366 list_for_each_entry(device, &dma_device_list, global_node)
326 list_for_each_entry(chan, &device->channels, device_node) { 367 list_for_each_entry(chan, &device->channels, device_node)
327 ack = client->event_callback(client, chan, 368 dma_chan_put(chan);
328 DMA_RESOURCE_REMOVED);
329
330 if (ack == DMA_ACK) {
331 dma_chan_put(chan);
332 chan->client_count--;
333 }
334 }
335 369
336 list_del(&client->global_node); 370 list_del(&client->global_node);
337 mutex_unlock(&dma_list_mutex); 371 mutex_unlock(&dma_list_mutex);
@@ -423,6 +457,21 @@ int dma_async_device_register(struct dma_device *device)
423 } 457 }
424 458
425 mutex_lock(&dma_list_mutex); 459 mutex_lock(&dma_list_mutex);
460 if (dmaengine_ref_count)
461 list_for_each_entry(chan, &device->channels, device_node) {
462 /* if clients are already waiting for channels we need
463 * to take references on their behalf
464 */
465 if (dma_chan_get(chan) == -ENODEV) {
466 /* note we can only get here for the first
467 * channel as the remaining channels are
468 * guaranteed to get a reference
469 */
470 rc = -ENODEV;
471 mutex_unlock(&dma_list_mutex);
472 goto err_out;
473 }
474 }
426 list_add_tail(&device->global_node, &dma_device_list); 475 list_add_tail(&device->global_node, &dma_device_list);
427 mutex_unlock(&dma_list_mutex); 476 mutex_unlock(&dma_list_mutex);
428 477
@@ -456,7 +505,7 @@ static void dma_async_device_cleanup(struct kref *kref)
456} 505}
457 506
458/** 507/**
459 * dma_async_device_unregister - unregisters DMA devices 508 * dma_async_device_unregister - unregister a DMA device
460 * @device: &dma_device 509 * @device: &dma_device
461 */ 510 */
462void dma_async_device_unregister(struct dma_device *device) 511void dma_async_device_unregister(struct dma_device *device)
@@ -468,7 +517,9 @@ void dma_async_device_unregister(struct dma_device *device)
468 mutex_unlock(&dma_list_mutex); 517 mutex_unlock(&dma_list_mutex);
469 518
470 list_for_each_entry(chan, &device->channels, device_node) { 519 list_for_each_entry(chan, &device->channels, device_node) {
471 dma_clients_notify_removed(chan); 520 WARN_ONCE(chan->client_count,
521 "%s called while %d clients hold a reference\n",
522 __func__, chan->client_count);
472 device_unregister(&chan->dev); 523 device_unregister(&chan->dev);
473 dma_chan_release(chan); 524 dma_chan_release(chan);
474 } 525 }