diff options
author | Sinan Kaya <okaya@codeaurora.org> | 2016-02-04 23:34:35 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2016-03-10 21:12:30 -0500 |
commit | 67a2003e060739747cfa3ea9b0d88b3d321ebf24 (patch) | |
tree | 5540e43567a985c72ae9c564b5005e4dcdc881b0 | |
parent | 7f8f209fd6e09a07fd1820144452caba419cf2b4 (diff) |
dmaengine: add Qualcomm Technologies HIDMA channel driver
This patch adds support for hidma engine. The driver consists of two
logical blocks. The DMA engine interface and the low-level interface.
The hardware only supports memcpy/memset and this driver only support
memcpy interface. HW and driver doesn't support slave interface.
Signed-off-by: Sinan Kaya <okaya@codeaurora.org>
Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/qcom/Kconfig | 10 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma.c | 706 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma.h | 160 |
3 files changed, 876 insertions, 0 deletions
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig index c975b1167acf..a7761c4025f4 100644 --- a/drivers/dma/qcom/Kconfig +++ b/drivers/dma/qcom/Kconfig | |||
@@ -17,3 +17,13 @@ config QCOM_HIDMA_MGMT | |||
17 | start managing the channels. In a virtualized environment, | 17 | start managing the channels. In a virtualized environment, |
18 | the guest OS would run QCOM_HIDMA channel driver and the | 18 | the guest OS would run QCOM_HIDMA channel driver and the |
19 | host would run the QCOM_HIDMA_MGMT management driver. | 19 | host would run the QCOM_HIDMA_MGMT management driver. |
20 | |||
21 | config QCOM_HIDMA | ||
22 | tristate "Qualcomm Technologies HIDMA Channel support" | ||
23 | select DMA_ENGINE | ||
24 | help | ||
25 | Enable support for the Qualcomm Technologies HIDMA controller. | ||
26 | The HIDMA controller supports optimized buffer copies | ||
27 | (user to kernel, kernel to kernel, etc.). It only supports | ||
28 | memcpy interface. The core is not intended for general | ||
29 | purpose slave DMA. | ||
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c new file mode 100644 index 000000000000..cccc78efbca9 --- /dev/null +++ b/drivers/dma/qcom/hidma.c | |||
@@ -0,0 +1,706 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA DMA engine interface | ||
3 | * | ||
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. | ||
18 | * Copyright (C) Semihalf 2009 | ||
19 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 | ||
20 | * Copyright (C) Alexander Popov, Promcontroller 2014 | ||
21 | * | ||
22 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | ||
23 | * (defines, structures and comments) was taken from MPC5121 DMA driver | ||
24 | * written by Hongjun Chen <hong-jun.chen@freescale.com>. | ||
25 | * | ||
26 | * Approved as OSADL project by a majority of OSADL members and funded | ||
27 | * by OSADL membership fees in 2009; for details see www.osadl.org. | ||
28 | * | ||
29 | * This program is free software; you can redistribute it and/or modify it | ||
30 | * under the terms of the GNU General Public License as published by the Free | ||
31 | * Software Foundation; either version 2 of the License, or (at your option) | ||
32 | * any later version. | ||
33 | * | ||
34 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
35 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
36 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
37 | * more details. | ||
38 | * | ||
39 | * The full GNU General Public License is included in this distribution in the | ||
40 | * file called COPYING. | ||
41 | */ | ||
42 | |||
43 | /* Linux Foundation elects GPLv2 license only. */ | ||
44 | |||
45 | #include <linux/dmaengine.h> | ||
46 | #include <linux/dma-mapping.h> | ||
47 | #include <linux/list.h> | ||
48 | #include <linux/module.h> | ||
49 | #include <linux/platform_device.h> | ||
50 | #include <linux/slab.h> | ||
51 | #include <linux/spinlock.h> | ||
52 | #include <linux/of_dma.h> | ||
53 | #include <linux/property.h> | ||
54 | #include <linux/delay.h> | ||
55 | #include <linux/acpi.h> | ||
56 | #include <linux/irq.h> | ||
57 | #include <linux/atomic.h> | ||
58 | #include <linux/pm_runtime.h> | ||
59 | |||
60 | #include "../dmaengine.h" | ||
61 | #include "hidma.h" | ||
62 | |||
63 | /* | ||
64 | * Default idle time is 2 seconds. This parameter can | ||
65 | * be overridden by changing the following | ||
66 | * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms | ||
67 | * during kernel boot. | ||
68 | */ | ||
69 | #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 | ||
70 | #define HIDMA_ERR_INFO_SW 0xFF | ||
71 | #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 | ||
72 | #define HIDMA_NR_DEFAULT_DESC 10 | ||
73 | |||
74 | static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) | ||
75 | { | ||
76 | return container_of(dmadev, struct hidma_dev, ddev); | ||
77 | } | ||
78 | |||
79 | static inline | ||
80 | struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp) | ||
81 | { | ||
82 | return container_of(_lldevp, struct hidma_dev, lldev); | ||
83 | } | ||
84 | |||
85 | static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach) | ||
86 | { | ||
87 | return container_of(dmach, struct hidma_chan, chan); | ||
88 | } | ||
89 | |||
90 | static inline | ||
91 | struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t) | ||
92 | { | ||
93 | return container_of(t, struct hidma_desc, desc); | ||
94 | } | ||
95 | |||
96 | static void hidma_free(struct hidma_dev *dmadev) | ||
97 | { | ||
98 | INIT_LIST_HEAD(&dmadev->ddev.channels); | ||
99 | } | ||
100 | |||
101 | static unsigned int nr_desc_prm; | ||
102 | module_param(nr_desc_prm, uint, 0644); | ||
103 | MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); | ||
104 | |||
105 | |||
106 | /* process completed descriptors */ | ||
107 | static void hidma_process_completed(struct hidma_chan *mchan) | ||
108 | { | ||
109 | struct dma_device *ddev = mchan->chan.device; | ||
110 | struct hidma_dev *mdma = to_hidma_dev(ddev); | ||
111 | struct dma_async_tx_descriptor *desc; | ||
112 | dma_cookie_t last_cookie; | ||
113 | struct hidma_desc *mdesc; | ||
114 | unsigned long irqflags; | ||
115 | struct list_head list; | ||
116 | |||
117 | INIT_LIST_HEAD(&list); | ||
118 | |||
119 | /* Get all completed descriptors */ | ||
120 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
121 | list_splice_tail_init(&mchan->completed, &list); | ||
122 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
123 | |||
124 | /* Execute callbacks and run dependencies */ | ||
125 | list_for_each_entry(mdesc, &list, node) { | ||
126 | enum dma_status llstat; | ||
127 | |||
128 | desc = &mdesc->desc; | ||
129 | |||
130 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
131 | dma_cookie_complete(desc); | ||
132 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
133 | |||
134 | llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); | ||
135 | if (desc->callback && (llstat == DMA_COMPLETE)) | ||
136 | desc->callback(desc->callback_param); | ||
137 | |||
138 | last_cookie = desc->cookie; | ||
139 | dma_run_dependencies(desc); | ||
140 | } | ||
141 | |||
142 | /* Free descriptors */ | ||
143 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
144 | list_splice_tail_init(&list, &mchan->free); | ||
145 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
146 | |||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Called once for each submitted descriptor. | ||
151 | * PM is locked once for each descriptor that is currently | ||
152 | * in execution. | ||
153 | */ | ||
154 | static void hidma_callback(void *data) | ||
155 | { | ||
156 | struct hidma_desc *mdesc = data; | ||
157 | struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan); | ||
158 | struct dma_device *ddev = mchan->chan.device; | ||
159 | struct hidma_dev *dmadev = to_hidma_dev(ddev); | ||
160 | unsigned long irqflags; | ||
161 | bool queued = false; | ||
162 | |||
163 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
164 | if (mdesc->node.next) { | ||
165 | /* Delete from the active list, add to completed list */ | ||
166 | list_move_tail(&mdesc->node, &mchan->completed); | ||
167 | queued = true; | ||
168 | |||
169 | /* calculate the next running descriptor */ | ||
170 | mchan->running = list_first_entry(&mchan->active, | ||
171 | struct hidma_desc, node); | ||
172 | } | ||
173 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
174 | |||
175 | hidma_process_completed(mchan); | ||
176 | |||
177 | if (queued) { | ||
178 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
179 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
180 | } | ||
181 | } | ||
182 | |||
183 | static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig) | ||
184 | { | ||
185 | struct hidma_chan *mchan; | ||
186 | struct dma_device *ddev; | ||
187 | |||
188 | mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL); | ||
189 | if (!mchan) | ||
190 | return -ENOMEM; | ||
191 | |||
192 | ddev = &dmadev->ddev; | ||
193 | mchan->dma_sig = dma_sig; | ||
194 | mchan->dmadev = dmadev; | ||
195 | mchan->chan.device = ddev; | ||
196 | dma_cookie_init(&mchan->chan); | ||
197 | |||
198 | INIT_LIST_HEAD(&mchan->free); | ||
199 | INIT_LIST_HEAD(&mchan->prepared); | ||
200 | INIT_LIST_HEAD(&mchan->active); | ||
201 | INIT_LIST_HEAD(&mchan->completed); | ||
202 | |||
203 | spin_lock_init(&mchan->lock); | ||
204 | list_add_tail(&mchan->chan.device_node, &ddev->channels); | ||
205 | dmadev->ddev.chancnt++; | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static void hidma_issue_task(unsigned long arg) | ||
210 | { | ||
211 | struct hidma_dev *dmadev = (struct hidma_dev *)arg; | ||
212 | |||
213 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
214 | hidma_ll_start(dmadev->lldev); | ||
215 | } | ||
216 | |||
217 | static void hidma_issue_pending(struct dma_chan *dmach) | ||
218 | { | ||
219 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
220 | struct hidma_dev *dmadev = mchan->dmadev; | ||
221 | unsigned long flags; | ||
222 | int status; | ||
223 | |||
224 | spin_lock_irqsave(&mchan->lock, flags); | ||
225 | if (!mchan->running) { | ||
226 | struct hidma_desc *desc = list_first_entry(&mchan->active, | ||
227 | struct hidma_desc, | ||
228 | node); | ||
229 | mchan->running = desc; | ||
230 | } | ||
231 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
232 | |||
233 | /* PM will be released in hidma_callback function. */ | ||
234 | status = pm_runtime_get(dmadev->ddev.dev); | ||
235 | if (status < 0) | ||
236 | tasklet_schedule(&dmadev->task); | ||
237 | else | ||
238 | hidma_ll_start(dmadev->lldev); | ||
239 | } | ||
240 | |||
241 | static enum dma_status hidma_tx_status(struct dma_chan *dmach, | ||
242 | dma_cookie_t cookie, | ||
243 | struct dma_tx_state *txstate) | ||
244 | { | ||
245 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
246 | enum dma_status ret; | ||
247 | |||
248 | ret = dma_cookie_status(dmach, cookie, txstate); | ||
249 | if (ret == DMA_COMPLETE) | ||
250 | return ret; | ||
251 | |||
252 | if (mchan->paused && (ret == DMA_IN_PROGRESS)) { | ||
253 | unsigned long flags; | ||
254 | dma_cookie_t runcookie; | ||
255 | |||
256 | spin_lock_irqsave(&mchan->lock, flags); | ||
257 | if (mchan->running) | ||
258 | runcookie = mchan->running->desc.cookie; | ||
259 | else | ||
260 | runcookie = -EINVAL; | ||
261 | |||
262 | if (runcookie == cookie) | ||
263 | ret = DMA_PAUSED; | ||
264 | |||
265 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
266 | } | ||
267 | |||
268 | return ret; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Submit descriptor to hardware. | ||
273 | * Lock the PM for each descriptor we are sending. | ||
274 | */ | ||
275 | static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd) | ||
276 | { | ||
277 | struct hidma_chan *mchan = to_hidma_chan(txd->chan); | ||
278 | struct hidma_dev *dmadev = mchan->dmadev; | ||
279 | struct hidma_desc *mdesc; | ||
280 | unsigned long irqflags; | ||
281 | dma_cookie_t cookie; | ||
282 | |||
283 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
284 | if (!hidma_ll_isenabled(dmadev->lldev)) { | ||
285 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
286 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
287 | return -ENODEV; | ||
288 | } | ||
289 | |||
290 | mdesc = container_of(txd, struct hidma_desc, desc); | ||
291 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
292 | |||
293 | /* Move descriptor to active */ | ||
294 | list_move_tail(&mdesc->node, &mchan->active); | ||
295 | |||
296 | /* Update cookie */ | ||
297 | cookie = dma_cookie_assign(txd); | ||
298 | |||
299 | hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch); | ||
300 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
301 | |||
302 | return cookie; | ||
303 | } | ||
304 | |||
305 | static int hidma_alloc_chan_resources(struct dma_chan *dmach) | ||
306 | { | ||
307 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
308 | struct hidma_dev *dmadev = mchan->dmadev; | ||
309 | struct hidma_desc *mdesc, *tmp; | ||
310 | unsigned long irqflags; | ||
311 | LIST_HEAD(descs); | ||
312 | unsigned int i; | ||
313 | int rc = 0; | ||
314 | |||
315 | if (mchan->allocated) | ||
316 | return 0; | ||
317 | |||
318 | /* Alloc descriptors for this channel */ | ||
319 | for (i = 0; i < dmadev->nr_descriptors; i++) { | ||
320 | mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT); | ||
321 | if (!mdesc) { | ||
322 | rc = -ENOMEM; | ||
323 | break; | ||
324 | } | ||
325 | dma_async_tx_descriptor_init(&mdesc->desc, dmach); | ||
326 | mdesc->desc.tx_submit = hidma_tx_submit; | ||
327 | |||
328 | rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig, | ||
329 | "DMA engine", hidma_callback, mdesc, | ||
330 | &mdesc->tre_ch); | ||
331 | if (rc) { | ||
332 | dev_err(dmach->device->dev, | ||
333 | "channel alloc failed at %u\n", i); | ||
334 | kfree(mdesc); | ||
335 | break; | ||
336 | } | ||
337 | list_add_tail(&mdesc->node, &descs); | ||
338 | } | ||
339 | |||
340 | if (rc) { | ||
341 | /* return the allocated descriptors */ | ||
342 | list_for_each_entry_safe(mdesc, tmp, &descs, node) { | ||
343 | hidma_ll_free(dmadev->lldev, mdesc->tre_ch); | ||
344 | kfree(mdesc); | ||
345 | } | ||
346 | return rc; | ||
347 | } | ||
348 | |||
349 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
350 | list_splice_tail_init(&descs, &mchan->free); | ||
351 | mchan->allocated = true; | ||
352 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
353 | return 1; | ||
354 | } | ||
355 | |||
356 | static struct dma_async_tx_descriptor * | ||
357 | hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, | ||
358 | size_t len, unsigned long flags) | ||
359 | { | ||
360 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
361 | struct hidma_desc *mdesc = NULL; | ||
362 | struct hidma_dev *mdma = mchan->dmadev; | ||
363 | unsigned long irqflags; | ||
364 | |||
365 | /* Get free descriptor */ | ||
366 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
367 | if (!list_empty(&mchan->free)) { | ||
368 | mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); | ||
369 | list_del(&mdesc->node); | ||
370 | } | ||
371 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
372 | |||
373 | if (!mdesc) | ||
374 | return NULL; | ||
375 | |||
376 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, | ||
377 | src, dest, len, flags); | ||
378 | |||
379 | /* Place descriptor in prepared list */ | ||
380 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
381 | list_add_tail(&mdesc->node, &mchan->prepared); | ||
382 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
383 | |||
384 | return &mdesc->desc; | ||
385 | } | ||
386 | |||
387 | static int hidma_terminate_channel(struct dma_chan *chan) | ||
388 | { | ||
389 | struct hidma_chan *mchan = to_hidma_chan(chan); | ||
390 | struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); | ||
391 | struct hidma_desc *tmp, *mdesc; | ||
392 | unsigned long irqflags; | ||
393 | LIST_HEAD(list); | ||
394 | int rc; | ||
395 | |||
396 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
397 | /* give completed requests a chance to finish */ | ||
398 | hidma_process_completed(mchan); | ||
399 | |||
400 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
401 | list_splice_init(&mchan->active, &list); | ||
402 | list_splice_init(&mchan->prepared, &list); | ||
403 | list_splice_init(&mchan->completed, &list); | ||
404 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
405 | |||
406 | /* this suspends the existing transfer */ | ||
407 | rc = hidma_ll_pause(dmadev->lldev); | ||
408 | if (rc) { | ||
409 | dev_err(dmadev->ddev.dev, "channel did not pause\n"); | ||
410 | goto out; | ||
411 | } | ||
412 | |||
413 | /* return all user requests */ | ||
414 | list_for_each_entry_safe(mdesc, tmp, &list, node) { | ||
415 | struct dma_async_tx_descriptor *txd = &mdesc->desc; | ||
416 | dma_async_tx_callback callback = mdesc->desc.callback; | ||
417 | void *param = mdesc->desc.callback_param; | ||
418 | |||
419 | dma_descriptor_unmap(txd); | ||
420 | |||
421 | if (callback) | ||
422 | callback(param); | ||
423 | |||
424 | dma_run_dependencies(txd); | ||
425 | |||
426 | /* move myself to free_list */ | ||
427 | list_move(&mdesc->node, &mchan->free); | ||
428 | } | ||
429 | |||
430 | rc = hidma_ll_resume(dmadev->lldev); | ||
431 | out: | ||
432 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
433 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
434 | return rc; | ||
435 | } | ||
436 | |||
437 | static int hidma_terminate_all(struct dma_chan *chan) | ||
438 | { | ||
439 | struct hidma_chan *mchan = to_hidma_chan(chan); | ||
440 | struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); | ||
441 | int rc; | ||
442 | |||
443 | rc = hidma_terminate_channel(chan); | ||
444 | if (rc) | ||
445 | return rc; | ||
446 | |||
447 | /* reinitialize the hardware */ | ||
448 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
449 | rc = hidma_ll_setup(dmadev->lldev); | ||
450 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
451 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
452 | return rc; | ||
453 | } | ||
454 | |||
455 | static void hidma_free_chan_resources(struct dma_chan *dmach) | ||
456 | { | ||
457 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
458 | struct hidma_dev *mdma = mchan->dmadev; | ||
459 | struct hidma_desc *mdesc, *tmp; | ||
460 | unsigned long irqflags; | ||
461 | LIST_HEAD(descs); | ||
462 | |||
463 | /* terminate running transactions and free descriptors */ | ||
464 | hidma_terminate_channel(dmach); | ||
465 | |||
466 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
467 | |||
468 | /* Move data */ | ||
469 | list_splice_tail_init(&mchan->free, &descs); | ||
470 | |||
471 | /* Free descriptors */ | ||
472 | list_for_each_entry_safe(mdesc, tmp, &descs, node) { | ||
473 | hidma_ll_free(mdma->lldev, mdesc->tre_ch); | ||
474 | list_del(&mdesc->node); | ||
475 | kfree(mdesc); | ||
476 | } | ||
477 | |||
478 | mchan->allocated = 0; | ||
479 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
480 | } | ||
481 | |||
482 | static int hidma_pause(struct dma_chan *chan) | ||
483 | { | ||
484 | struct hidma_chan *mchan; | ||
485 | struct hidma_dev *dmadev; | ||
486 | |||
487 | mchan = to_hidma_chan(chan); | ||
488 | dmadev = to_hidma_dev(mchan->chan.device); | ||
489 | if (!mchan->paused) { | ||
490 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
491 | if (hidma_ll_pause(dmadev->lldev)) | ||
492 | dev_warn(dmadev->ddev.dev, "channel did not stop\n"); | ||
493 | mchan->paused = true; | ||
494 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
495 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
496 | } | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static int hidma_resume(struct dma_chan *chan) | ||
501 | { | ||
502 | struct hidma_chan *mchan; | ||
503 | struct hidma_dev *dmadev; | ||
504 | int rc = 0; | ||
505 | |||
506 | mchan = to_hidma_chan(chan); | ||
507 | dmadev = to_hidma_dev(mchan->chan.device); | ||
508 | if (mchan->paused) { | ||
509 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
510 | rc = hidma_ll_resume(dmadev->lldev); | ||
511 | if (!rc) | ||
512 | mchan->paused = false; | ||
513 | else | ||
514 | dev_err(dmadev->ddev.dev, | ||
515 | "failed to resume the channel"); | ||
516 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
517 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
518 | } | ||
519 | return rc; | ||
520 | } | ||
521 | |||
522 | static irqreturn_t hidma_chirq_handler(int chirq, void *arg) | ||
523 | { | ||
524 | struct hidma_lldev *lldev = arg; | ||
525 | |||
526 | /* | ||
527 | * All interrupts are request driven. | ||
528 | * HW doesn't send an interrupt by itself. | ||
529 | */ | ||
530 | return hidma_ll_inthandler(chirq, lldev); | ||
531 | } | ||
532 | |||
533 | static int hidma_probe(struct platform_device *pdev) | ||
534 | { | ||
535 | struct hidma_dev *dmadev; | ||
536 | struct resource *trca_resource; | ||
537 | struct resource *evca_resource; | ||
538 | int chirq; | ||
539 | void __iomem *evca; | ||
540 | void __iomem *trca; | ||
541 | int rc; | ||
542 | |||
543 | pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); | ||
544 | pm_runtime_use_autosuspend(&pdev->dev); | ||
545 | pm_runtime_set_active(&pdev->dev); | ||
546 | pm_runtime_enable(&pdev->dev); | ||
547 | |||
548 | trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
549 | trca = devm_ioremap_resource(&pdev->dev, trca_resource); | ||
550 | if (IS_ERR(trca)) { | ||
551 | rc = -ENOMEM; | ||
552 | goto bailout; | ||
553 | } | ||
554 | |||
555 | evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
556 | evca = devm_ioremap_resource(&pdev->dev, evca_resource); | ||
557 | if (IS_ERR(evca)) { | ||
558 | rc = -ENOMEM; | ||
559 | goto bailout; | ||
560 | } | ||
561 | |||
562 | /* | ||
563 | * This driver only handles the channel IRQs. | ||
564 | * Common IRQ is handled by the management driver. | ||
565 | */ | ||
566 | chirq = platform_get_irq(pdev, 0); | ||
567 | if (chirq < 0) { | ||
568 | rc = -ENODEV; | ||
569 | goto bailout; | ||
570 | } | ||
571 | |||
572 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); | ||
573 | if (!dmadev) { | ||
574 | rc = -ENOMEM; | ||
575 | goto bailout; | ||
576 | } | ||
577 | |||
578 | INIT_LIST_HEAD(&dmadev->ddev.channels); | ||
579 | spin_lock_init(&dmadev->lock); | ||
580 | dmadev->ddev.dev = &pdev->dev; | ||
581 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
582 | |||
583 | dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); | ||
584 | if (WARN_ON(!pdev->dev.dma_mask)) { | ||
585 | rc = -ENXIO; | ||
586 | goto dmafree; | ||
587 | } | ||
588 | |||
589 | dmadev->dev_evca = evca; | ||
590 | dmadev->evca_resource = evca_resource; | ||
591 | dmadev->dev_trca = trca; | ||
592 | dmadev->trca_resource = trca_resource; | ||
593 | dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; | ||
594 | dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; | ||
595 | dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; | ||
596 | dmadev->ddev.device_tx_status = hidma_tx_status; | ||
597 | dmadev->ddev.device_issue_pending = hidma_issue_pending; | ||
598 | dmadev->ddev.device_pause = hidma_pause; | ||
599 | dmadev->ddev.device_resume = hidma_resume; | ||
600 | dmadev->ddev.device_terminate_all = hidma_terminate_all; | ||
601 | dmadev->ddev.copy_align = 8; | ||
602 | |||
603 | device_property_read_u32(&pdev->dev, "desc-count", | ||
604 | &dmadev->nr_descriptors); | ||
605 | |||
606 | if (!dmadev->nr_descriptors && nr_desc_prm) | ||
607 | dmadev->nr_descriptors = nr_desc_prm; | ||
608 | |||
609 | if (!dmadev->nr_descriptors) | ||
610 | dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; | ||
611 | |||
612 | dmadev->chidx = readl(dmadev->dev_trca + 0x28); | ||
613 | |||
614 | /* Set DMA mask to 64 bits. */ | ||
615 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | ||
616 | if (rc) { | ||
617 | dev_warn(&pdev->dev, "unable to set coherent mask to 64"); | ||
618 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | ||
619 | if (rc) | ||
620 | goto dmafree; | ||
621 | } | ||
622 | |||
623 | dmadev->lldev = hidma_ll_init(dmadev->ddev.dev, | ||
624 | dmadev->nr_descriptors, dmadev->dev_trca, | ||
625 | dmadev->dev_evca, dmadev->chidx); | ||
626 | if (!dmadev->lldev) { | ||
627 | rc = -EPROBE_DEFER; | ||
628 | goto dmafree; | ||
629 | } | ||
630 | |||
631 | rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0, | ||
632 | "qcom-hidma", dmadev->lldev); | ||
633 | if (rc) | ||
634 | goto uninit; | ||
635 | |||
636 | INIT_LIST_HEAD(&dmadev->ddev.channels); | ||
637 | rc = hidma_chan_init(dmadev, 0); | ||
638 | if (rc) | ||
639 | goto uninit; | ||
640 | |||
641 | rc = dma_async_device_register(&dmadev->ddev); | ||
642 | if (rc) | ||
643 | goto uninit; | ||
644 | |||
645 | dmadev->irq = chirq; | ||
646 | tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); | ||
647 | dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); | ||
648 | platform_set_drvdata(pdev, dmadev); | ||
649 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
650 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
651 | return 0; | ||
652 | |||
653 | uninit: | ||
654 | hidma_ll_uninit(dmadev->lldev); | ||
655 | dmafree: | ||
656 | if (dmadev) | ||
657 | hidma_free(dmadev); | ||
658 | bailout: | ||
659 | pm_runtime_put_sync(&pdev->dev); | ||
660 | pm_runtime_disable(&pdev->dev); | ||
661 | return rc; | ||
662 | } | ||
663 | |||
664 | static int hidma_remove(struct platform_device *pdev) | ||
665 | { | ||
666 | struct hidma_dev *dmadev = platform_get_drvdata(pdev); | ||
667 | |||
668 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
669 | dma_async_device_unregister(&dmadev->ddev); | ||
670 | devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); | ||
671 | hidma_ll_uninit(dmadev->lldev); | ||
672 | hidma_free(dmadev); | ||
673 | |||
674 | dev_info(&pdev->dev, "HI-DMA engine removed\n"); | ||
675 | pm_runtime_put_sync_suspend(&pdev->dev); | ||
676 | pm_runtime_disable(&pdev->dev); | ||
677 | |||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | #if IS_ENABLED(CONFIG_ACPI) | ||
682 | static const struct acpi_device_id hidma_acpi_ids[] = { | ||
683 | {"QCOM8061"}, | ||
684 | {}, | ||
685 | }; | ||
686 | #endif | ||
687 | |||
688 | static const struct of_device_id hidma_match[] = { | ||
689 | {.compatible = "qcom,hidma-1.0",}, | ||
690 | {}, | ||
691 | }; | ||
692 | |||
693 | MODULE_DEVICE_TABLE(of, hidma_match); | ||
694 | |||
695 | static struct platform_driver hidma_driver = { | ||
696 | .probe = hidma_probe, | ||
697 | .remove = hidma_remove, | ||
698 | .driver = { | ||
699 | .name = "hidma", | ||
700 | .of_match_table = hidma_match, | ||
701 | .acpi_match_table = ACPI_PTR(hidma_acpi_ids), | ||
702 | }, | ||
703 | }; | ||
704 | |||
705 | module_platform_driver(hidma_driver); | ||
706 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h new file mode 100644 index 000000000000..231e306f6d87 --- /dev/null +++ b/drivers/dma/qcom/hidma.h | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA data structures | ||
3 | * | ||
4 | * Copyright (c) 2014, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef QCOM_HIDMA_H | ||
17 | #define QCOM_HIDMA_H | ||
18 | |||
19 | #include <linux/kfifo.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/dmaengine.h> | ||
22 | |||
23 | #define TRE_SIZE 32 /* each TRE is 32 bytes */ | ||
24 | #define TRE_CFG_IDX 0 | ||
25 | #define TRE_LEN_IDX 1 | ||
26 | #define TRE_SRC_LOW_IDX 2 | ||
27 | #define TRE_SRC_HI_IDX 3 | ||
28 | #define TRE_DEST_LOW_IDX 4 | ||
29 | #define TRE_DEST_HI_IDX 5 | ||
30 | |||
31 | struct hidma_tx_status { | ||
32 | u8 err_info; /* error record in this transfer */ | ||
33 | u8 err_code; /* completion code */ | ||
34 | }; | ||
35 | |||
36 | struct hidma_tre { | ||
37 | atomic_t allocated; /* if this channel is allocated */ | ||
38 | bool queued; /* flag whether this is pending */ | ||
39 | u16 status; /* status */ | ||
40 | u32 chidx; /* index of the tre */ | ||
41 | u32 dma_sig; /* signature of the tre */ | ||
42 | const char *dev_name; /* name of the device */ | ||
43 | void (*callback)(void *data); /* requester callback */ | ||
44 | void *data; /* Data associated with this channel*/ | ||
45 | struct hidma_lldev *lldev; /* lldma device pointer */ | ||
46 | u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ | ||
47 | u32 tre_index; /* the offset where this was written*/ | ||
48 | u32 int_flags; /* interrupt flags */ | ||
49 | }; | ||
50 | |||
51 | struct hidma_lldev { | ||
52 | bool initialized; /* initialized flag */ | ||
53 | u8 trch_state; /* trch_state of the device */ | ||
54 | u8 evch_state; /* evch_state of the device */ | ||
55 | u8 chidx; /* channel index in the core */ | ||
56 | u32 nr_tres; /* max number of configs */ | ||
57 | spinlock_t lock; /* reentrancy */ | ||
58 | struct hidma_tre *trepool; /* trepool of user configs */ | ||
59 | struct device *dev; /* device */ | ||
60 | void __iomem *trca; /* Transfer Channel address */ | ||
61 | void __iomem *evca; /* Event Channel address */ | ||
62 | struct hidma_tre | ||
63 | **pending_tre_list; /* Pointers to pending TREs */ | ||
64 | struct hidma_tx_status | ||
65 | *tx_status_list; /* Pointers to pending TREs status*/ | ||
66 | s32 pending_tre_count; /* Number of TREs pending */ | ||
67 | |||
68 | void *tre_ring; /* TRE ring */ | ||
69 | dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */ | ||
70 | u32 tre_ring_size; /* Byte size of the ring */ | ||
71 | u32 tre_processed_off; /* last processed TRE */ | ||
72 | |||
73 | void *evre_ring; /* EVRE ring */ | ||
74 | dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */ | ||
75 | u32 evre_ring_size; /* Byte size of the ring */ | ||
76 | u32 evre_processed_off; /* last processed EVRE */ | ||
77 | |||
78 | u32 tre_write_offset; /* TRE write location */ | ||
79 | struct tasklet_struct task; /* task delivering notifications */ | ||
80 | DECLARE_KFIFO_PTR(handoff_fifo, | ||
81 | struct hidma_tre *); /* pending TREs FIFO */ | ||
82 | }; | ||
83 | |||
84 | struct hidma_desc { | ||
85 | struct dma_async_tx_descriptor desc; | ||
86 | /* link list node for this channel*/ | ||
87 | struct list_head node; | ||
88 | u32 tre_ch; | ||
89 | }; | ||
90 | |||
91 | struct hidma_chan { | ||
92 | bool paused; | ||
93 | bool allocated; | ||
94 | char dbg_name[16]; | ||
95 | u32 dma_sig; | ||
96 | |||
97 | /* | ||
98 | * active descriptor on this channel | ||
99 | * It is used by the DMA complete notification to | ||
100 | * locate the descriptor that initiated the transfer. | ||
101 | */ | ||
102 | struct dentry *debugfs; | ||
103 | struct dentry *stats; | ||
104 | struct hidma_dev *dmadev; | ||
105 | struct hidma_desc *running; | ||
106 | |||
107 | struct dma_chan chan; | ||
108 | struct list_head free; | ||
109 | struct list_head prepared; | ||
110 | struct list_head active; | ||
111 | struct list_head completed; | ||
112 | |||
113 | /* Lock for this structure */ | ||
114 | spinlock_t lock; | ||
115 | }; | ||
116 | |||
117 | struct hidma_dev { | ||
118 | int irq; | ||
119 | int chidx; | ||
120 | u32 nr_descriptors; | ||
121 | |||
122 | struct hidma_lldev *lldev; | ||
123 | void __iomem *dev_trca; | ||
124 | struct resource *trca_resource; | ||
125 | void __iomem *dev_evca; | ||
126 | struct resource *evca_resource; | ||
127 | |||
128 | /* used to protect the pending channel list*/ | ||
129 | spinlock_t lock; | ||
130 | struct dma_device ddev; | ||
131 | |||
132 | struct dentry *debugfs; | ||
133 | struct dentry *stats; | ||
134 | |||
135 | /* Task delivering issue_pending */ | ||
136 | struct tasklet_struct task; | ||
137 | }; | ||
138 | |||
139 | int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id, | ||
140 | const char *dev_name, | ||
141 | void (*callback)(void *data), void *data, u32 *tre_ch); | ||
142 | |||
143 | void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch); | ||
144 | enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch); | ||
145 | bool hidma_ll_isenabled(struct hidma_lldev *llhndl); | ||
146 | void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch); | ||
147 | void hidma_ll_start(struct hidma_lldev *llhndl); | ||
148 | int hidma_ll_pause(struct hidma_lldev *llhndl); | ||
149 | int hidma_ll_resume(struct hidma_lldev *llhndl); | ||
150 | void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, | ||
151 | dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); | ||
152 | int hidma_ll_setup(struct hidma_lldev *lldev); | ||
153 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, | ||
154 | void __iomem *trca, void __iomem *evca, | ||
155 | u8 chidx); | ||
156 | int hidma_ll_uninit(struct hidma_lldev *llhndl); | ||
157 | irqreturn_t hidma_ll_inthandler(int irq, void *arg); | ||
158 | void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, | ||
159 | u8 err_code); | ||
160 | #endif | ||