diff options
-rw-r--r-- | drivers/Kconfig | 2 | ||||
-rw-r--r-- | drivers/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 13 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 408 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 337 |
6 files changed, 762 insertions, 0 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index aeb5ab2391e4..8b11cebe65df 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -72,4 +72,6 @@ source "drivers/edac/Kconfig" | |||
72 | 72 | ||
73 | source "drivers/rtc/Kconfig" | 73 | source "drivers/rtc/Kconfig" |
74 | 74 | ||
75 | source "drivers/dma/Kconfig" | ||
76 | |||
75 | endmenu | 77 | endmenu |
diff --git a/drivers/Makefile b/drivers/Makefile index 447d8e68887a..3c5170310bd0 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -74,3 +74,4 @@ obj-$(CONFIG_SGI_SN) += sn/ | |||
74 | obj-y += firmware/ | 74 | obj-y += firmware/ |
75 | obj-$(CONFIG_CRYPTO) += crypto/ | 75 | obj-$(CONFIG_CRYPTO) += crypto/ |
76 | obj-$(CONFIG_SUPERH) += sh/ | 76 | obj-$(CONFIG_SUPERH) += sh/ |
77 | obj-$(CONFIG_DMA_ENGINE) += dma/ | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig new file mode 100644 index 000000000000..f9ac4bcf8652 --- /dev/null +++ b/drivers/dma/Kconfig | |||
@@ -0,0 +1,13 @@ | |||
1 | # | ||
2 | # DMA engine configuration | ||
3 | # | ||
4 | |||
5 | menu "DMA Engine support" | ||
6 | |||
7 | config DMA_ENGINE | ||
8 | bool "Support for DMA engines" | ||
9 | ---help--- | ||
10 | DMA engines offload copy operations from the CPU to dedicated | ||
11 | hardware, allowing the copies to happen asynchronously. | ||
12 | |||
13 | endmenu | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile new file mode 100644 index 000000000000..10b739138c93 --- /dev/null +++ b/drivers/dma/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-y += dmaengine.o | |||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c new file mode 100644 index 000000000000..473c47b6f094 --- /dev/null +++ b/drivers/dma/dmaengine.c | |||
@@ -0,0 +1,408 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the Free | ||
6 | * Software Foundation; either version 2 of the License, or (at your option) | ||
7 | * any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called COPYING. | ||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * This code implements the DMA subsystem. It provides a HW-neutral interface | ||
24 | * for other kernel code to use asynchronous memory copy capabilities, | ||
25 | * if present, and allows different HW DMA drivers to register as providing | ||
26 | * this capability. | ||
27 | * | ||
28 | * Due to the fact we are accelerating what is already a relatively fast | ||
29 | * operation, the code goes to great lengths to avoid additional overhead, | ||
30 | * such as locking. | ||
31 | * | ||
32 | * LOCKING: | ||
33 | * | ||
34 | * The subsystem keeps two global lists, dma_device_list and dma_client_list. | ||
35 | * Both of these are protected by a mutex, dma_list_mutex. | ||
36 | * | ||
37 | * Each device has a channels list, which runs unlocked but is never modified | ||
38 | * once the device is registered, it's just setup by the driver. | ||
39 | * | ||
40 | * Each client has a channels list, it's only modified under the client->lock | ||
41 | * and in an RCU callback, so it's safe to read under rcu_read_lock(). | ||
42 | * | ||
43 | * Each device has a kref, which is initialized to 1 when the device is | ||
44 | * registered. A kref_put is done for each class_device registered. When the | ||
45 | * class_device is released, the coresponding kref_put is done in the release | ||
46 | * method. Every time one of the device's channels is allocated to a client, | ||
47 | * a kref_get occurs. When the channel is freed, the coresponding kref_put | ||
48 | * happens. The device's release function does a completion, so | ||
49 | * unregister_device does a remove event, class_device_unregister, a kref_put | ||
50 | * for the first reference, then waits on the completion for all other | ||
51 | * references to finish. | ||
52 | * | ||
53 | * Each channel has an open-coded implementation of Rusty Russell's "bigref," | ||
54 | * with a kref and a per_cpu local_t. A single reference is set when on an | ||
55 | * ADDED event, and removed with a REMOVE event. Net DMA client takes an | ||
56 | * extra reference per outstanding transaction. The relase function does a | ||
57 | * kref_put on the device. -ChrisL | ||
58 | */ | ||
59 | |||
60 | #include <linux/init.h> | ||
61 | #include <linux/module.h> | ||
62 | #include <linux/device.h> | ||
63 | #include <linux/dmaengine.h> | ||
64 | #include <linux/hardirq.h> | ||
65 | #include <linux/spinlock.h> | ||
66 | #include <linux/percpu.h> | ||
67 | #include <linux/rcupdate.h> | ||
68 | #include <linux/mutex.h> | ||
69 | |||
70 | static DEFINE_MUTEX(dma_list_mutex); | ||
71 | static LIST_HEAD(dma_device_list); | ||
72 | static LIST_HEAD(dma_client_list); | ||
73 | |||
74 | /* --- sysfs implementation --- */ | ||
75 | |||
76 | static ssize_t show_memcpy_count(struct class_device *cd, char *buf) | ||
77 | { | ||
78 | struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); | ||
79 | unsigned long count = 0; | ||
80 | int i; | ||
81 | |||
82 | for_each_cpu(i) | ||
83 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | ||
84 | |||
85 | return sprintf(buf, "%lu\n", count); | ||
86 | } | ||
87 | |||
88 | static ssize_t show_bytes_transferred(struct class_device *cd, char *buf) | ||
89 | { | ||
90 | struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); | ||
91 | unsigned long count = 0; | ||
92 | int i; | ||
93 | |||
94 | for_each_cpu(i) | ||
95 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | ||
96 | |||
97 | return sprintf(buf, "%lu\n", count); | ||
98 | } | ||
99 | |||
100 | static ssize_t show_in_use(struct class_device *cd, char *buf) | ||
101 | { | ||
102 | struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); | ||
103 | |||
104 | return sprintf(buf, "%d\n", (chan->client ? 1 : 0)); | ||
105 | } | ||
106 | |||
107 | static struct class_device_attribute dma_class_attrs[] = { | ||
108 | __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), | ||
109 | __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), | ||
110 | __ATTR(in_use, S_IRUGO, show_in_use, NULL), | ||
111 | __ATTR_NULL | ||
112 | }; | ||
113 | |||
114 | static void dma_async_device_cleanup(struct kref *kref); | ||
115 | |||
116 | static void dma_class_dev_release(struct class_device *cd) | ||
117 | { | ||
118 | struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); | ||
119 | kref_put(&chan->device->refcount, dma_async_device_cleanup); | ||
120 | } | ||
121 | |||
122 | static struct class dma_devclass = { | ||
123 | .name = "dma", | ||
124 | .class_dev_attrs = dma_class_attrs, | ||
125 | .release = dma_class_dev_release, | ||
126 | }; | ||
127 | |||
128 | /* --- client and device registration --- */ | ||
129 | |||
130 | /** | ||
131 | * dma_client_chan_alloc - try to allocate a channel to a client | ||
132 | * @client: &dma_client | ||
133 | * | ||
134 | * Called with dma_list_mutex held. | ||
135 | */ | ||
136 | static struct dma_chan *dma_client_chan_alloc(struct dma_client *client) | ||
137 | { | ||
138 | struct dma_device *device; | ||
139 | struct dma_chan *chan; | ||
140 | unsigned long flags; | ||
141 | int desc; /* allocated descriptor count */ | ||
142 | |||
143 | /* Find a channel, any DMA engine will do */ | ||
144 | list_for_each_entry(device, &dma_device_list, global_node) { | ||
145 | list_for_each_entry(chan, &device->channels, device_node) { | ||
146 | if (chan->client) | ||
147 | continue; | ||
148 | |||
149 | desc = chan->device->device_alloc_chan_resources(chan); | ||
150 | if (desc >= 0) { | ||
151 | kref_get(&device->refcount); | ||
152 | kref_init(&chan->refcount); | ||
153 | chan->slow_ref = 0; | ||
154 | INIT_RCU_HEAD(&chan->rcu); | ||
155 | chan->client = client; | ||
156 | spin_lock_irqsave(&client->lock, flags); | ||
157 | list_add_tail_rcu(&chan->client_node, | ||
158 | &client->channels); | ||
159 | spin_unlock_irqrestore(&client->lock, flags); | ||
160 | return chan; | ||
161 | } | ||
162 | } | ||
163 | } | ||
164 | |||
165 | return NULL; | ||
166 | } | ||
167 | |||
168 | /** | ||
169 | * dma_client_chan_free - release a DMA channel | ||
170 | * @chan: &dma_chan | ||
171 | */ | ||
172 | void dma_chan_cleanup(struct kref *kref) | ||
173 | { | ||
174 | struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); | ||
175 | chan->device->device_free_chan_resources(chan); | ||
176 | chan->client = NULL; | ||
177 | kref_put(&chan->device->refcount, dma_async_device_cleanup); | ||
178 | } | ||
179 | |||
180 | static void dma_chan_free_rcu(struct rcu_head *rcu) | ||
181 | { | ||
182 | struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); | ||
183 | int bias = 0x7FFFFFFF; | ||
184 | int i; | ||
185 | for_each_cpu(i) | ||
186 | bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount); | ||
187 | atomic_sub(bias, &chan->refcount.refcount); | ||
188 | kref_put(&chan->refcount, dma_chan_cleanup); | ||
189 | } | ||
190 | |||
191 | static void dma_client_chan_free(struct dma_chan *chan) | ||
192 | { | ||
193 | atomic_add(0x7FFFFFFF, &chan->refcount.refcount); | ||
194 | chan->slow_ref = 1; | ||
195 | call_rcu(&chan->rcu, dma_chan_free_rcu); | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * dma_chans_rebalance - reallocate channels to clients | ||
200 | * | ||
201 | * When the number of DMA channel in the system changes, | ||
202 | * channels need to be rebalanced among clients | ||
203 | */ | ||
204 | static void dma_chans_rebalance(void) | ||
205 | { | ||
206 | struct dma_client *client; | ||
207 | struct dma_chan *chan; | ||
208 | unsigned long flags; | ||
209 | |||
210 | mutex_lock(&dma_list_mutex); | ||
211 | |||
212 | list_for_each_entry(client, &dma_client_list, global_node) { | ||
213 | while (client->chans_desired > client->chan_count) { | ||
214 | chan = dma_client_chan_alloc(client); | ||
215 | if (!chan) | ||
216 | break; | ||
217 | client->chan_count++; | ||
218 | client->event_callback(client, | ||
219 | chan, | ||
220 | DMA_RESOURCE_ADDED); | ||
221 | } | ||
222 | while (client->chans_desired < client->chan_count) { | ||
223 | spin_lock_irqsave(&client->lock, flags); | ||
224 | chan = list_entry(client->channels.next, | ||
225 | struct dma_chan, | ||
226 | client_node); | ||
227 | list_del_rcu(&chan->client_node); | ||
228 | spin_unlock_irqrestore(&client->lock, flags); | ||
229 | client->chan_count--; | ||
230 | client->event_callback(client, | ||
231 | chan, | ||
232 | DMA_RESOURCE_REMOVED); | ||
233 | dma_client_chan_free(chan); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | mutex_unlock(&dma_list_mutex); | ||
238 | } | ||
239 | |||
240 | /** | ||
241 | * dma_async_client_register - allocate and register a &dma_client | ||
242 | * @event_callback: callback for notification of channel addition/removal | ||
243 | */ | ||
244 | struct dma_client *dma_async_client_register(dma_event_callback event_callback) | ||
245 | { | ||
246 | struct dma_client *client; | ||
247 | |||
248 | client = kzalloc(sizeof(*client), GFP_KERNEL); | ||
249 | if (!client) | ||
250 | return NULL; | ||
251 | |||
252 | INIT_LIST_HEAD(&client->channels); | ||
253 | spin_lock_init(&client->lock); | ||
254 | client->chans_desired = 0; | ||
255 | client->chan_count = 0; | ||
256 | client->event_callback = event_callback; | ||
257 | |||
258 | mutex_lock(&dma_list_mutex); | ||
259 | list_add_tail(&client->global_node, &dma_client_list); | ||
260 | mutex_unlock(&dma_list_mutex); | ||
261 | |||
262 | return client; | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * dma_async_client_unregister - unregister a client and free the &dma_client | ||
267 | * @client: | ||
268 | * | ||
269 | * Force frees any allocated DMA channels, frees the &dma_client memory | ||
270 | */ | ||
271 | void dma_async_client_unregister(struct dma_client *client) | ||
272 | { | ||
273 | struct dma_chan *chan; | ||
274 | |||
275 | if (!client) | ||
276 | return; | ||
277 | |||
278 | rcu_read_lock(); | ||
279 | list_for_each_entry_rcu(chan, &client->channels, client_node) | ||
280 | dma_client_chan_free(chan); | ||
281 | rcu_read_unlock(); | ||
282 | |||
283 | mutex_lock(&dma_list_mutex); | ||
284 | list_del(&client->global_node); | ||
285 | mutex_unlock(&dma_list_mutex); | ||
286 | |||
287 | kfree(client); | ||
288 | dma_chans_rebalance(); | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * dma_async_client_chan_request - request DMA channels | ||
293 | * @client: &dma_client | ||
294 | * @number: count of DMA channels requested | ||
295 | * | ||
296 | * Clients call dma_async_client_chan_request() to specify how many | ||
297 | * DMA channels they need, 0 to free all currently allocated. | ||
298 | * The resulting allocations/frees are indicated to the client via the | ||
299 | * event callback. | ||
300 | */ | ||
301 | void dma_async_client_chan_request(struct dma_client *client, | ||
302 | unsigned int number) | ||
303 | { | ||
304 | client->chans_desired = number; | ||
305 | dma_chans_rebalance(); | ||
306 | } | ||
307 | |||
308 | /** | ||
309 | * dma_async_device_register - | ||
310 | * @device: &dma_device | ||
311 | */ | ||
312 | int dma_async_device_register(struct dma_device *device) | ||
313 | { | ||
314 | static int id; | ||
315 | int chancnt = 0; | ||
316 | struct dma_chan* chan; | ||
317 | |||
318 | if (!device) | ||
319 | return -ENODEV; | ||
320 | |||
321 | init_completion(&device->done); | ||
322 | kref_init(&device->refcount); | ||
323 | device->dev_id = id++; | ||
324 | |||
325 | /* represent channels in sysfs. Probably want devs too */ | ||
326 | list_for_each_entry(chan, &device->channels, device_node) { | ||
327 | chan->local = alloc_percpu(typeof(*chan->local)); | ||
328 | if (chan->local == NULL) | ||
329 | continue; | ||
330 | |||
331 | chan->chan_id = chancnt++; | ||
332 | chan->class_dev.class = &dma_devclass; | ||
333 | chan->class_dev.dev = NULL; | ||
334 | snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d", | ||
335 | device->dev_id, chan->chan_id); | ||
336 | |||
337 | kref_get(&device->refcount); | ||
338 | class_device_register(&chan->class_dev); | ||
339 | } | ||
340 | |||
341 | mutex_lock(&dma_list_mutex); | ||
342 | list_add_tail(&device->global_node, &dma_device_list); | ||
343 | mutex_unlock(&dma_list_mutex); | ||
344 | |||
345 | dma_chans_rebalance(); | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * dma_async_device_unregister - | ||
352 | * @device: &dma_device | ||
353 | */ | ||
354 | static void dma_async_device_cleanup(struct kref *kref) | ||
355 | { | ||
356 | struct dma_device *device; | ||
357 | |||
358 | device = container_of(kref, struct dma_device, refcount); | ||
359 | complete(&device->done); | ||
360 | } | ||
361 | |||
362 | void dma_async_device_unregister(struct dma_device* device) | ||
363 | { | ||
364 | struct dma_chan *chan; | ||
365 | unsigned long flags; | ||
366 | |||
367 | mutex_lock(&dma_list_mutex); | ||
368 | list_del(&device->global_node); | ||
369 | mutex_unlock(&dma_list_mutex); | ||
370 | |||
371 | list_for_each_entry(chan, &device->channels, device_node) { | ||
372 | if (chan->client) { | ||
373 | spin_lock_irqsave(&chan->client->lock, flags); | ||
374 | list_del(&chan->client_node); | ||
375 | chan->client->chan_count--; | ||
376 | spin_unlock_irqrestore(&chan->client->lock, flags); | ||
377 | chan->client->event_callback(chan->client, | ||
378 | chan, | ||
379 | DMA_RESOURCE_REMOVED); | ||
380 | dma_client_chan_free(chan); | ||
381 | } | ||
382 | class_device_unregister(&chan->class_dev); | ||
383 | } | ||
384 | dma_chans_rebalance(); | ||
385 | |||
386 | kref_put(&device->refcount, dma_async_device_cleanup); | ||
387 | wait_for_completion(&device->done); | ||
388 | } | ||
389 | |||
390 | static int __init dma_bus_init(void) | ||
391 | { | ||
392 | mutex_init(&dma_list_mutex); | ||
393 | return class_register(&dma_devclass); | ||
394 | } | ||
395 | |||
396 | subsys_initcall(dma_bus_init); | ||
397 | |||
398 | EXPORT_SYMBOL(dma_async_client_register); | ||
399 | EXPORT_SYMBOL(dma_async_client_unregister); | ||
400 | EXPORT_SYMBOL(dma_async_client_chan_request); | ||
401 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | ||
402 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | ||
403 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | ||
404 | EXPORT_SYMBOL(dma_async_memcpy_complete); | ||
405 | EXPORT_SYMBOL(dma_async_memcpy_issue_pending); | ||
406 | EXPORT_SYMBOL(dma_async_device_register); | ||
407 | EXPORT_SYMBOL(dma_async_device_unregister); | ||
408 | EXPORT_SYMBOL(dma_chan_cleanup); | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h new file mode 100644 index 000000000000..30781546ac99 --- /dev/null +++ b/include/linux/dmaengine.h | |||
@@ -0,0 +1,337 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the Free | ||
6 | * Software Foundation; either version 2 of the License, or (at your option) | ||
7 | * any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called COPYING. | ||
20 | */ | ||
21 | #ifndef DMAENGINE_H | ||
22 | #define DMAENGINE_H | ||
23 | #include <linux/config.h> | ||
24 | #ifdef CONFIG_DMA_ENGINE | ||
25 | |||
26 | #include <linux/device.h> | ||
27 | #include <linux/uio.h> | ||
28 | #include <linux/kref.h> | ||
29 | #include <linux/completion.h> | ||
30 | #include <linux/rcupdate.h> | ||
31 | |||
32 | /** | ||
33 | * enum dma_event - resource PNP/power managment events | ||
34 | * @DMA_RESOURCE_SUSPEND: DMA device going into low power state | ||
35 | * @DMA_RESOURCE_RESUME: DMA device returning to full power | ||
36 | * @DMA_RESOURCE_ADDED: DMA device added to the system | ||
37 | * @DMA_RESOURCE_REMOVED: DMA device removed from the system | ||
38 | */ | ||
39 | enum dma_event { | ||
40 | DMA_RESOURCE_SUSPEND, | ||
41 | DMA_RESOURCE_RESUME, | ||
42 | DMA_RESOURCE_ADDED, | ||
43 | DMA_RESOURCE_REMOVED, | ||
44 | }; | ||
45 | |||
46 | /** | ||
47 | * typedef dma_cookie_t | ||
48 | * | ||
49 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | ||
50 | */ | ||
51 | typedef s32 dma_cookie_t; | ||
52 | |||
53 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) | ||
54 | |||
55 | /** | ||
56 | * enum dma_status - DMA transaction status | ||
57 | * @DMA_SUCCESS: transaction completed successfully | ||
58 | * @DMA_IN_PROGRESS: transaction not yet processed | ||
59 | * @DMA_ERROR: transaction failed | ||
60 | */ | ||
61 | enum dma_status { | ||
62 | DMA_SUCCESS, | ||
63 | DMA_IN_PROGRESS, | ||
64 | DMA_ERROR, | ||
65 | }; | ||
66 | |||
67 | /** | ||
68 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan | ||
69 | * @refcount: local_t used for open-coded "bigref" counting | ||
70 | * @memcpy_count: transaction counter | ||
71 | * @bytes_transferred: byte counter | ||
72 | */ | ||
73 | |||
74 | struct dma_chan_percpu { | ||
75 | local_t refcount; | ||
76 | /* stats */ | ||
77 | unsigned long memcpy_count; | ||
78 | unsigned long bytes_transferred; | ||
79 | }; | ||
80 | |||
81 | /** | ||
82 | * struct dma_chan - devices supply DMA channels, clients use them | ||
83 | * @client: ptr to the client user of this chan, will be NULL when unused | ||
84 | * @device: ptr to the dma device who supplies this channel, always !NULL | ||
85 | * @cookie: last cookie value returned to client | ||
86 | * @chan_id: | ||
87 | * @class_dev: | ||
88 | * @refcount: kref, used in "bigref" slow-mode | ||
89 | * @slow_ref: | ||
90 | * @rcu: | ||
91 | * @client_node: used to add this to the client chan list | ||
92 | * @device_node: used to add this to the device chan list | ||
93 | * @local: per-cpu pointer to a struct dma_chan_percpu | ||
94 | */ | ||
95 | struct dma_chan { | ||
96 | struct dma_client *client; | ||
97 | struct dma_device *device; | ||
98 | dma_cookie_t cookie; | ||
99 | |||
100 | /* sysfs */ | ||
101 | int chan_id; | ||
102 | struct class_device class_dev; | ||
103 | |||
104 | struct kref refcount; | ||
105 | int slow_ref; | ||
106 | struct rcu_head rcu; | ||
107 | |||
108 | struct list_head client_node; | ||
109 | struct list_head device_node; | ||
110 | struct dma_chan_percpu *local; | ||
111 | }; | ||
112 | |||
113 | void dma_chan_cleanup(struct kref *kref); | ||
114 | |||
115 | static inline void dma_chan_get(struct dma_chan *chan) | ||
116 | { | ||
117 | if (unlikely(chan->slow_ref)) | ||
118 | kref_get(&chan->refcount); | ||
119 | else { | ||
120 | local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | ||
121 | put_cpu(); | ||
122 | } | ||
123 | } | ||
124 | |||
125 | static inline void dma_chan_put(struct dma_chan *chan) | ||
126 | { | ||
127 | if (unlikely(chan->slow_ref)) | ||
128 | kref_put(&chan->refcount, dma_chan_cleanup); | ||
129 | else { | ||
130 | local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | ||
131 | put_cpu(); | ||
132 | } | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * typedef dma_event_callback - function pointer to a DMA event callback | ||
137 | */ | ||
138 | typedef void (*dma_event_callback) (struct dma_client *client, | ||
139 | struct dma_chan *chan, enum dma_event event); | ||
140 | |||
141 | /** | ||
142 | * struct dma_client - info on the entity making use of DMA services | ||
143 | * @event_callback: func ptr to call when something happens | ||
144 | * @chan_count: number of chans allocated | ||
145 | * @chans_desired: number of chans requested. Can be +/- chan_count | ||
146 | * @lock: protects access to the channels list | ||
147 | * @channels: the list of DMA channels allocated | ||
148 | * @global_node: list_head for global dma_client_list | ||
149 | */ | ||
150 | struct dma_client { | ||
151 | dma_event_callback event_callback; | ||
152 | unsigned int chan_count; | ||
153 | unsigned int chans_desired; | ||
154 | |||
155 | spinlock_t lock; | ||
156 | struct list_head channels; | ||
157 | struct list_head global_node; | ||
158 | }; | ||
159 | |||
160 | /** | ||
161 | * struct dma_device - info on the entity supplying DMA services | ||
162 | * @chancnt: how many DMA channels are supported | ||
163 | * @channels: the list of struct dma_chan | ||
164 | * @global_node: list_head for global dma_device_list | ||
165 | * @refcount: | ||
166 | * @done: | ||
167 | * @dev_id: | ||
168 | * Other func ptrs: used to make use of this device's capabilities | ||
169 | */ | ||
170 | struct dma_device { | ||
171 | |||
172 | unsigned int chancnt; | ||
173 | struct list_head channels; | ||
174 | struct list_head global_node; | ||
175 | |||
176 | struct kref refcount; | ||
177 | struct completion done; | ||
178 | |||
179 | int dev_id; | ||
180 | |||
181 | int (*device_alloc_chan_resources)(struct dma_chan *chan); | ||
182 | void (*device_free_chan_resources)(struct dma_chan *chan); | ||
183 | dma_cookie_t (*device_memcpy_buf_to_buf)(struct dma_chan *chan, | ||
184 | void *dest, void *src, size_t len); | ||
185 | dma_cookie_t (*device_memcpy_buf_to_pg)(struct dma_chan *chan, | ||
186 | struct page *page, unsigned int offset, void *kdata, | ||
187 | size_t len); | ||
188 | dma_cookie_t (*device_memcpy_pg_to_pg)(struct dma_chan *chan, | ||
189 | struct page *dest_pg, unsigned int dest_off, | ||
190 | struct page *src_pg, unsigned int src_off, size_t len); | ||
191 | enum dma_status (*device_memcpy_complete)(struct dma_chan *chan, | ||
192 | dma_cookie_t cookie, dma_cookie_t *last, | ||
193 | dma_cookie_t *used); | ||
194 | void (*device_memcpy_issue_pending)(struct dma_chan *chan); | ||
195 | }; | ||
196 | |||
197 | /* --- public DMA engine API --- */ | ||
198 | |||
199 | struct dma_client *dma_async_client_register(dma_event_callback event_callback); | ||
200 | void dma_async_client_unregister(struct dma_client *client); | ||
201 | void dma_async_client_chan_request(struct dma_client *client, | ||
202 | unsigned int number); | ||
203 | |||
204 | /** | ||
205 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | ||
206 | * @chan: DMA channel to offload copy to | ||
207 | * @dest: destination address (virtual) | ||
208 | * @src: source address (virtual) | ||
209 | * @len: length | ||
210 | * | ||
211 | * Both @dest and @src must be mappable to a bus address according to the | ||
212 | * DMA mapping API rules for streaming mappings. | ||
213 | * Both @dest and @src must stay memory resident (kernel memory or locked | ||
214 | * user space pages) | ||
215 | */ | ||
216 | static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | ||
217 | void *dest, void *src, size_t len) | ||
218 | { | ||
219 | int cpu = get_cpu(); | ||
220 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | ||
221 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | ||
222 | put_cpu(); | ||
223 | |||
224 | return chan->device->device_memcpy_buf_to_buf(chan, dest, src, len); | ||
225 | } | ||
226 | |||
227 | /** | ||
228 | * dma_async_memcpy_buf_to_pg - offloaded copy | ||
229 | * @chan: DMA channel to offload copy to | ||
230 | * @page: destination page | ||
231 | * @offset: offset in page to copy to | ||
232 | * @kdata: source address (virtual) | ||
233 | * @len: length | ||
234 | * | ||
235 | * Both @page/@offset and @kdata must be mappable to a bus address according | ||
236 | * to the DMA mapping API rules for streaming mappings. | ||
237 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | ||
238 | * locked user space pages) | ||
239 | */ | ||
240 | static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | ||
241 | struct page *page, unsigned int offset, void *kdata, size_t len) | ||
242 | { | ||
243 | int cpu = get_cpu(); | ||
244 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | ||
245 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | ||
246 | put_cpu(); | ||
247 | |||
248 | return chan->device->device_memcpy_buf_to_pg(chan, page, offset, | ||
249 | kdata, len); | ||
250 | } | ||
251 | |||
252 | /** | ||
253 | * dma_async_memcpy_buf_to_pg - offloaded copy | ||
254 | * @chan: DMA channel to offload copy to | ||
255 | * @dest_page: destination page | ||
256 | * @dest_off: offset in page to copy to | ||
257 | * @src_page: source page | ||
258 | * @src_off: offset in page to copy from | ||
259 | * @len: length | ||
260 | * | ||
261 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus | ||
262 | * address according to the DMA mapping API rules for streaming mappings. | ||
263 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident | ||
264 | * (kernel memory or locked user space pages) | ||
265 | */ | ||
266 | static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, | ||
267 | struct page *dest_pg, unsigned int dest_off, struct page *src_pg, | ||
268 | unsigned int src_off, size_t len) | ||
269 | { | ||
270 | int cpu = get_cpu(); | ||
271 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | ||
272 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | ||
273 | put_cpu(); | ||
274 | |||
275 | return chan->device->device_memcpy_pg_to_pg(chan, dest_pg, dest_off, | ||
276 | src_pg, src_off, len); | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * dma_async_memcpy_issue_pending - flush pending copies to HW | ||
281 | * @chan: | ||
282 | * | ||
283 | * This allows drivers to push copies to HW in batches, | ||
284 | * reducing MMIO writes where possible. | ||
285 | */ | ||
286 | static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan) | ||
287 | { | ||
288 | return chan->device->device_memcpy_issue_pending(chan); | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * dma_async_memcpy_complete - poll for transaction completion | ||
293 | * @chan: DMA channel | ||
294 | * @cookie: transaction identifier to check status of | ||
295 | * @last: returns last completed cookie, can be NULL | ||
296 | * @used: returns last issued cookie, can be NULL | ||
297 | * | ||
298 | * If @last and @used are passed in, upon return they reflect the driver | ||
299 | * internal state and can be used with dma_async_is_complete() to check | ||
300 | * the status of multiple cookies without re-checking hardware state. | ||
301 | */ | ||
302 | static inline enum dma_status dma_async_memcpy_complete(struct dma_chan *chan, | ||
303 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) | ||
304 | { | ||
305 | return chan->device->device_memcpy_complete(chan, cookie, last, used); | ||
306 | } | ||
307 | |||
308 | /** | ||
309 | * dma_async_is_complete - test a cookie against chan state | ||
310 | * @cookie: transaction identifier to test status of | ||
311 | * @last_complete: last know completed transaction | ||
312 | * @last_used: last cookie value handed out | ||
313 | * | ||
314 | * dma_async_is_complete() is used in dma_async_memcpy_complete() | ||
315 | * the test logic is seperated for lightweight testing of multiple cookies | ||
316 | */ | ||
317 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | ||
318 | dma_cookie_t last_complete, dma_cookie_t last_used) | ||
319 | { | ||
320 | if (last_complete <= last_used) { | ||
321 | if ((cookie <= last_complete) || (cookie > last_used)) | ||
322 | return DMA_SUCCESS; | ||
323 | } else { | ||
324 | if ((cookie <= last_complete) && (cookie > last_used)) | ||
325 | return DMA_SUCCESS; | ||
326 | } | ||
327 | return DMA_IN_PROGRESS; | ||
328 | } | ||
329 | |||
330 | |||
331 | /* --- DMA device --- */ | ||
332 | |||
333 | int dma_async_device_register(struct dma_device *device); | ||
334 | void dma_async_device_unregister(struct dma_device *device); | ||
335 | |||
336 | #endif /* CONFIG_DMA_ENGINE */ | ||
337 | #endif /* DMAENGINE_H */ | ||