aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2007-07-09 14:56:42 -0400
committerDan Williams <dan.j.williams@intel.com>2007-07-13 11:06:13 -0400
commitd379b01e9087a582d58f4b678208a4f8d8376fe7 (patch)
tree155920bca93c18afba66b9d5acfecd359d5bec65 /drivers/dma/dmaengine.c
parent7405f74badf46b5d023c5d2b670b4471525f6c91 (diff)
dmaengine: make clients responsible for managing channels
The current implementation assumes that a channel will only be used by one client at a time. In order to enable channel sharing the dmaengine core is changed to a model where clients subscribe to channel-available-events. Instead of tracking how many channels a client wants and how many it has received the core just broadcasts the available channels and lets the clients optionally take a reference. The core learns about the clients' needs at dma_event_callback time. In support of multiple operation types, clients can specify a capability mask to only be notified of channels that satisfy a certain set of capabilities. Changelog: * removed DMA_TX_ARRAY_INIT, no longer needed * dma_client_chan_free -> dma_chan_release: switch to global reference counting only at device unregistration time, before it was also happening at client unregistration time * clients now return dma_state_client to dmaengine (ack, dup, nak) * checkpatch.pl fixes * fixup merge with git-ioat Cc: Chris Leech <christopher.leech@intel.com> Signed-off-by: Shannon Nelson <shannon.nelson@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c217
1 files changed, 111 insertions, 106 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 404cc7b6e705..82489923af09 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -37,11 +37,11 @@
37 * Each device has a channels list, which runs unlocked but is never modified 37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver. 38 * once the device is registered, it's just setup by the driver.
39 * 39 *
40 * Each client has a channels list, it's only modified under the client->lock 40 * Each client is responsible for keeping track of the channels it uses. See
41 * and in an RCU callback, so it's safe to read under rcu_read_lock(). 41 * the definition of dma_event_callback in dmaengine.h.
42 * 42 *
43 * Each device has a kref, which is initialized to 1 when the device is 43 * Each device has a kref, which is initialized to 1 when the device is
44 * registered. A kref_put is done for each class_device registered. When the 44 * registered. A kref_get is done for each class_device registered. When the
45 * class_device is released, the coresponding kref_put is done in the release 45 * class_device is released, the coresponding kref_put is done in the release
46 * method. Every time one of the device's channels is allocated to a client, 46 * method. Every time one of the device's channels is allocated to a client,
47 * a kref_get occurs. When the channel is freed, the coresponding kref_put 47 * a kref_get occurs. When the channel is freed, the coresponding kref_put
@@ -51,10 +51,12 @@
51 * references to finish. 51 * references to finish.
52 * 52 *
53 * Each channel has an open-coded implementation of Rusty Russell's "bigref," 53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
54 * with a kref and a per_cpu local_t. A single reference is set when on an 54 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
55 * ADDED event, and removed with a REMOVE event. Net DMA client takes an 55 * signals that it wants to use a channel, and dma_chan_put is called when
56 * extra reference per outstanding transaction. The relase function does a 56 * a channel is removed or a client using it is unregesitered. A client can
57 * kref_put on the device. -ChrisL 57 * take extra references per outstanding transaction, as is the case with
58 * the NET DMA client. The release function does a kref_put on the device.
59 * -ChrisL, DanW
58 */ 60 */
59 61
60#include <linux/init.h> 62#include <linux/init.h>
@@ -102,8 +104,19 @@ static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
102static ssize_t show_in_use(struct class_device *cd, char *buf) 104static ssize_t show_in_use(struct class_device *cd, char *buf)
103{ 105{
104 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); 106 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
107 int in_use = 0;
108
109 if (unlikely(chan->slow_ref) &&
110 atomic_read(&chan->refcount.refcount) > 1)
111 in_use = 1;
112 else {
113 if (local_read(&(per_cpu_ptr(chan->local,
114 get_cpu())->refcount)) > 0)
115 in_use = 1;
116 put_cpu();
117 }
105 118
106 return sprintf(buf, "%d\n", (chan->client ? 1 : 0)); 119 return sprintf(buf, "%d\n", in_use);
107} 120}
108 121
109static struct class_device_attribute dma_class_attrs[] = { 122static struct class_device_attribute dma_class_attrs[] = {
@@ -129,42 +142,53 @@ static struct class dma_devclass = {
129 142
130/* --- client and device registration --- */ 143/* --- client and device registration --- */
131 144
145#define dma_chan_satisfies_mask(chan, mask) \
146 __dma_chan_satisfies_mask((chan), &(mask))
147static int
148__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
149{
150 dma_cap_mask_t has;
151
152 bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
153 DMA_TX_TYPE_END);
154 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
155}
156
132/** 157/**
133 * dma_client_chan_alloc - try to allocate a channel to a client 158 * dma_client_chan_alloc - try to allocate channels to a client
134 * @client: &dma_client 159 * @client: &dma_client
135 * 160 *
136 * Called with dma_list_mutex held. 161 * Called with dma_list_mutex held.
137 */ 162 */
138static struct dma_chan *dma_client_chan_alloc(struct dma_client *client) 163static void dma_client_chan_alloc(struct dma_client *client)
139{ 164{
140 struct dma_device *device; 165 struct dma_device *device;
141 struct dma_chan *chan; 166 struct dma_chan *chan;
142 unsigned long flags;
143 int desc; /* allocated descriptor count */ 167 int desc; /* allocated descriptor count */
168 enum dma_state_client ack;
144 169
145 /* Find a channel, any DMA engine will do */ 170 /* Find a channel */
146 list_for_each_entry(device, &dma_device_list, global_node) { 171 list_for_each_entry(device, &dma_device_list, global_node)
147 list_for_each_entry(chan, &device->channels, device_node) { 172 list_for_each_entry(chan, &device->channels, device_node) {
148 if (chan->client) 173 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
149 continue; 174 continue;
150 175
151 desc = chan->device->device_alloc_chan_resources(chan); 176 desc = chan->device->device_alloc_chan_resources(chan);
152 if (desc >= 0) { 177 if (desc >= 0) {
153 kref_get(&device->refcount); 178 ack = client->event_callback(client,
154 kref_init(&chan->refcount); 179 chan,
155 chan->slow_ref = 0; 180 DMA_RESOURCE_AVAILABLE);
156 INIT_RCU_HEAD(&chan->rcu); 181
157 chan->client = client; 182 /* we are done once this client rejects
158 spin_lock_irqsave(&client->lock, flags); 183 * an available resource
159 list_add_tail_rcu(&chan->client_node, 184 */
160 &client->channels); 185 if (ack == DMA_ACK) {
161 spin_unlock_irqrestore(&client->lock, flags); 186 dma_chan_get(chan);
162 return chan; 187 kref_get(&device->refcount);
188 } else if (ack == DMA_NAK)
189 return;
163 } 190 }
164 } 191 }
165 }
166
167 return NULL;
168} 192}
169 193
170enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 194enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -193,7 +217,6 @@ void dma_chan_cleanup(struct kref *kref)
193{ 217{
194 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); 218 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
195 chan->device->device_free_chan_resources(chan); 219 chan->device->device_free_chan_resources(chan);
196 chan->client = NULL;
197 kref_put(&chan->device->refcount, dma_async_device_cleanup); 220 kref_put(&chan->device->refcount, dma_async_device_cleanup);
198} 221}
199EXPORT_SYMBOL(dma_chan_cleanup); 222EXPORT_SYMBOL(dma_chan_cleanup);
@@ -209,7 +232,7 @@ static void dma_chan_free_rcu(struct rcu_head *rcu)
209 kref_put(&chan->refcount, dma_chan_cleanup); 232 kref_put(&chan->refcount, dma_chan_cleanup);
210} 233}
211 234
212static void dma_client_chan_free(struct dma_chan *chan) 235static void dma_chan_release(struct dma_chan *chan)
213{ 236{
214 atomic_add(0x7FFFFFFF, &chan->refcount.refcount); 237 atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
215 chan->slow_ref = 1; 238 chan->slow_ref = 1;
@@ -217,70 +240,57 @@ static void dma_client_chan_free(struct dma_chan *chan)
217} 240}
218 241
219/** 242/**
220 * dma_chans_rebalance - reallocate channels to clients 243 * dma_chans_notify_available - broadcast available channels to the clients
221 *
222 * When the number of DMA channel in the system changes,
223 * channels need to be rebalanced among clients.
224 */ 244 */
225static void dma_chans_rebalance(void) 245static void dma_clients_notify_available(void)
226{ 246{
227 struct dma_client *client; 247 struct dma_client *client;
228 struct dma_chan *chan;
229 unsigned long flags;
230 248
231 mutex_lock(&dma_list_mutex); 249 mutex_lock(&dma_list_mutex);
232 250
233 list_for_each_entry(client, &dma_client_list, global_node) { 251 list_for_each_entry(client, &dma_client_list, global_node)
234 while (client->chans_desired > client->chan_count) { 252 dma_client_chan_alloc(client);
235 chan = dma_client_chan_alloc(client);
236 if (!chan)
237 break;
238 client->chan_count++;
239 client->event_callback(client,
240 chan,
241 DMA_RESOURCE_ADDED);
242 }
243 while (client->chans_desired < client->chan_count) {
244 spin_lock_irqsave(&client->lock, flags);
245 chan = list_entry(client->channels.next,
246 struct dma_chan,
247 client_node);
248 list_del_rcu(&chan->client_node);
249 spin_unlock_irqrestore(&client->lock, flags);
250 client->chan_count--;
251 client->event_callback(client,
252 chan,
253 DMA_RESOURCE_REMOVED);
254 dma_client_chan_free(chan);
255 }
256 }
257 253
258 mutex_unlock(&dma_list_mutex); 254 mutex_unlock(&dma_list_mutex);
259} 255}
260 256
261/** 257/**
262 * dma_async_client_register - allocate and register a &dma_client 258 * dma_chans_notify_available - tell the clients that a channel is going away
263 * @event_callback: callback for notification of channel addition/removal 259 * @chan: channel on its way out
264 */ 260 */
265struct dma_client *dma_async_client_register(dma_event_callback event_callback) 261static void dma_clients_notify_removed(struct dma_chan *chan)
266{ 262{
267 struct dma_client *client; 263 struct dma_client *client;
264 enum dma_state_client ack;
268 265
269 client = kzalloc(sizeof(*client), GFP_KERNEL); 266 mutex_lock(&dma_list_mutex);
270 if (!client) 267
271 return NULL; 268 list_for_each_entry(client, &dma_client_list, global_node) {
269 ack = client->event_callback(client, chan,
270 DMA_RESOURCE_REMOVED);
271
272 /* client was holding resources for this channel so
273 * free it
274 */
275 if (ack == DMA_ACK) {
276 dma_chan_put(chan);
277 kref_put(&chan->device->refcount,
278 dma_async_device_cleanup);
279 }
280 }
272 281
273 INIT_LIST_HEAD(&client->channels); 282 mutex_unlock(&dma_list_mutex);
274 spin_lock_init(&client->lock); 283}
275 client->chans_desired = 0;
276 client->chan_count = 0;
277 client->event_callback = event_callback;
278 284
285/**
286 * dma_async_client_register - register a &dma_client
287 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
288 */
289void dma_async_client_register(struct dma_client *client)
290{
279 mutex_lock(&dma_list_mutex); 291 mutex_lock(&dma_list_mutex);
280 list_add_tail(&client->global_node, &dma_client_list); 292 list_add_tail(&client->global_node, &dma_client_list);
281 mutex_unlock(&dma_list_mutex); 293 mutex_unlock(&dma_list_mutex);
282
283 return client;
284} 294}
285EXPORT_SYMBOL(dma_async_client_register); 295EXPORT_SYMBOL(dma_async_client_register);
286 296
@@ -292,40 +302,42 @@ EXPORT_SYMBOL(dma_async_client_register);
292 */ 302 */
293void dma_async_client_unregister(struct dma_client *client) 303void dma_async_client_unregister(struct dma_client *client)
294{ 304{
305 struct dma_device *device;
295 struct dma_chan *chan; 306 struct dma_chan *chan;
307 enum dma_state_client ack;
296 308
297 if (!client) 309 if (!client)
298 return; 310 return;
299 311
300 rcu_read_lock();
301 list_for_each_entry_rcu(chan, &client->channels, client_node)
302 dma_client_chan_free(chan);
303 rcu_read_unlock();
304
305 mutex_lock(&dma_list_mutex); 312 mutex_lock(&dma_list_mutex);
313 /* free all channels the client is holding */
314 list_for_each_entry(device, &dma_device_list, global_node)
315 list_for_each_entry(chan, &device->channels, device_node) {
316 ack = client->event_callback(client, chan,
317 DMA_RESOURCE_REMOVED);
318
319 if (ack == DMA_ACK) {
320 dma_chan_put(chan);
321 kref_put(&chan->device->refcount,
322 dma_async_device_cleanup);
323 }
324 }
325
306 list_del(&client->global_node); 326 list_del(&client->global_node);
307 mutex_unlock(&dma_list_mutex); 327 mutex_unlock(&dma_list_mutex);
308
309 kfree(client);
310 dma_chans_rebalance();
311} 328}
312EXPORT_SYMBOL(dma_async_client_unregister); 329EXPORT_SYMBOL(dma_async_client_unregister);
313 330
314/** 331/**
315 * dma_async_client_chan_request - request DMA channels 332 * dma_async_client_chan_request - send all available channels to the
316 * @client: &dma_client 333 * client that satisfy the capability mask
317 * @number: count of DMA channels requested 334 * @client - requester
318 *
319 * Clients call dma_async_client_chan_request() to specify how many
320 * DMA channels they need, 0 to free all currently allocated.
321 * The resulting allocations/frees are indicated to the client via the
322 * event callback.
323 */ 335 */
324void dma_async_client_chan_request(struct dma_client *client, 336void dma_async_client_chan_request(struct dma_client *client)
325 unsigned int number)
326{ 337{
327 client->chans_desired = number; 338 mutex_lock(&dma_list_mutex);
328 dma_chans_rebalance(); 339 dma_client_chan_alloc(client);
340 mutex_unlock(&dma_list_mutex);
329} 341}
330EXPORT_SYMBOL(dma_async_client_chan_request); 342EXPORT_SYMBOL(dma_async_client_chan_request);
331 343
@@ -386,13 +398,16 @@ int dma_async_device_register(struct dma_device *device)
386 } 398 }
387 399
388 kref_get(&device->refcount); 400 kref_get(&device->refcount);
401 kref_init(&chan->refcount);
402 chan->slow_ref = 0;
403 INIT_RCU_HEAD(&chan->rcu);
389 } 404 }
390 405
391 mutex_lock(&dma_list_mutex); 406 mutex_lock(&dma_list_mutex);
392 list_add_tail(&device->global_node, &dma_device_list); 407 list_add_tail(&device->global_node, &dma_device_list);
393 mutex_unlock(&dma_list_mutex); 408 mutex_unlock(&dma_list_mutex);
394 409
395 dma_chans_rebalance(); 410 dma_clients_notify_available();
396 411
397 return 0; 412 return 0;
398 413
@@ -428,26 +443,16 @@ static void dma_async_device_cleanup(struct kref *kref)
428void dma_async_device_unregister(struct dma_device *device) 443void dma_async_device_unregister(struct dma_device *device)
429{ 444{
430 struct dma_chan *chan; 445 struct dma_chan *chan;
431 unsigned long flags;
432 446
433 mutex_lock(&dma_list_mutex); 447 mutex_lock(&dma_list_mutex);
434 list_del(&device->global_node); 448 list_del(&device->global_node);
435 mutex_unlock(&dma_list_mutex); 449 mutex_unlock(&dma_list_mutex);
436 450
437 list_for_each_entry(chan, &device->channels, device_node) { 451 list_for_each_entry(chan, &device->channels, device_node) {
438 if (chan->client) { 452 dma_clients_notify_removed(chan);
439 spin_lock_irqsave(&chan->client->lock, flags);
440 list_del(&chan->client_node);
441 chan->client->chan_count--;
442 spin_unlock_irqrestore(&chan->client->lock, flags);
443 chan->client->event_callback(chan->client,
444 chan,
445 DMA_RESOURCE_REMOVED);
446 dma_client_chan_free(chan);
447 }
448 class_device_unregister(&chan->class_dev); 453 class_device_unregister(&chan->class_dev);
454 dma_chan_release(chan);
449 } 455 }
450 dma_chans_rebalance();
451 456
452 kref_put(&device->refcount, dma_async_device_cleanup); 457 kref_put(&device->refcount, dma_async_device_cleanup);
453 wait_for_completion(&device->done); 458 wait_for_completion(&device->done);