diff options
Diffstat (limited to 'drivers/dma/tegra20-apb-dma.c')
-rw-r--r-- | drivers/dma/tegra20-apb-dma.c | 1410 |
1 files changed, 1410 insertions, 0 deletions
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c new file mode 100644 index 000000000000..134ea7cfc795 --- /dev/null +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -0,0 +1,1410 @@ | |||
1 | /* | ||
2 | * DMA driver for Nvidia's Tegra20 APB DMA controller. | ||
3 | * | ||
4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/bitops.h> | ||
20 | #include <linux/clk.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/dmaengine.h> | ||
23 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/of.h> | ||
30 | #include <linux/of_device.h> | ||
31 | #include <linux/platform_device.h> | ||
32 | #include <linux/pm_runtime.h> | ||
33 | #include <linux/slab.h> | ||
34 | |||
35 | #include <mach/clk.h> | ||
36 | #include "dmaengine.h" | ||
37 | |||
38 | #define TEGRA_APBDMA_GENERAL 0x0 | ||
39 | #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) | ||
40 | |||
41 | #define TEGRA_APBDMA_CONTROL 0x010 | ||
42 | #define TEGRA_APBDMA_IRQ_MASK 0x01c | ||
43 | #define TEGRA_APBDMA_IRQ_MASK_SET 0x020 | ||
44 | |||
45 | /* CSR register */ | ||
46 | #define TEGRA_APBDMA_CHAN_CSR 0x00 | ||
47 | #define TEGRA_APBDMA_CSR_ENB BIT(31) | ||
48 | #define TEGRA_APBDMA_CSR_IE_EOC BIT(30) | ||
49 | #define TEGRA_APBDMA_CSR_HOLD BIT(29) | ||
50 | #define TEGRA_APBDMA_CSR_DIR BIT(28) | ||
51 | #define TEGRA_APBDMA_CSR_ONCE BIT(27) | ||
52 | #define TEGRA_APBDMA_CSR_FLOW BIT(21) | ||
53 | #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 | ||
54 | #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC | ||
55 | |||
56 | /* STATUS register */ | ||
57 | #define TEGRA_APBDMA_CHAN_STATUS 0x004 | ||
58 | #define TEGRA_APBDMA_STATUS_BUSY BIT(31) | ||
59 | #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30) | ||
60 | #define TEGRA_APBDMA_STATUS_HALT BIT(29) | ||
61 | #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28) | ||
62 | #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 | ||
63 | #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC | ||
64 | |||
65 | /* AHB memory address */ | ||
66 | #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 | ||
67 | |||
68 | /* AHB sequence register */ | ||
69 | #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14 | ||
70 | #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31) | ||
71 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28) | ||
72 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28) | ||
73 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28) | ||
74 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28) | ||
75 | #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28) | ||
76 | #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27) | ||
77 | #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24) | ||
78 | #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24) | ||
79 | #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24) | ||
80 | #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19) | ||
81 | #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16 | ||
82 | #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0 | ||
83 | |||
84 | /* APB address */ | ||
85 | #define TEGRA_APBDMA_CHAN_APBPTR 0x018 | ||
86 | |||
87 | /* APB sequence register */ | ||
88 | #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c | ||
89 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28) | ||
90 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28) | ||
91 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28) | ||
92 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28) | ||
93 | #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28) | ||
94 | #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) | ||
95 | #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) | ||
96 | |||
97 | /* | ||
98 | * If any burst is in flight and DMA paused then this is the time to complete | ||
99 | * on-flight burst and update DMA status register. | ||
100 | */ | ||
101 | #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20 | ||
102 | |||
103 | /* Channel base address offset from APBDMA base address */ | ||
104 | #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 | ||
105 | |||
106 | /* DMA channel register space size */ | ||
107 | #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20 | ||
108 | |||
109 | struct tegra_dma; | ||
110 | |||
111 | /* | ||
112 | * tegra_dma_chip_data Tegra chip specific DMA data | ||
113 | * @nr_channels: Number of channels available in the controller. | ||
114 | * @max_dma_count: Maximum DMA transfer count supported by DMA controller. | ||
115 | */ | ||
116 | struct tegra_dma_chip_data { | ||
117 | int nr_channels; | ||
118 | int max_dma_count; | ||
119 | }; | ||
120 | |||
121 | /* DMA channel registers */ | ||
122 | struct tegra_dma_channel_regs { | ||
123 | unsigned long csr; | ||
124 | unsigned long ahb_ptr; | ||
125 | unsigned long apb_ptr; | ||
126 | unsigned long ahb_seq; | ||
127 | unsigned long apb_seq; | ||
128 | }; | ||
129 | |||
130 | /* | ||
131 | * tegra_dma_sg_req: Dma request details to configure hardware. This | ||
132 | * contains the details for one transfer to configure DMA hw. | ||
133 | * The client's request for data transfer can be broken into multiple | ||
134 | * sub-transfer as per requester details and hw support. | ||
135 | * This sub transfer get added in the list of transfer and point to Tegra | ||
136 | * DMA descriptor which manages the transfer details. | ||
137 | */ | ||
138 | struct tegra_dma_sg_req { | ||
139 | struct tegra_dma_channel_regs ch_regs; | ||
140 | int req_len; | ||
141 | bool configured; | ||
142 | bool last_sg; | ||
143 | bool half_done; | ||
144 | struct list_head node; | ||
145 | struct tegra_dma_desc *dma_desc; | ||
146 | }; | ||
147 | |||
148 | /* | ||
149 | * tegra_dma_desc: Tegra DMA descriptors which manages the client requests. | ||
150 | * This descriptor keep track of transfer status, callbacks and request | ||
151 | * counts etc. | ||
152 | */ | ||
153 | struct tegra_dma_desc { | ||
154 | struct dma_async_tx_descriptor txd; | ||
155 | int bytes_requested; | ||
156 | int bytes_transferred; | ||
157 | enum dma_status dma_status; | ||
158 | struct list_head node; | ||
159 | struct list_head tx_list; | ||
160 | struct list_head cb_node; | ||
161 | int cb_count; | ||
162 | }; | ||
163 | |||
164 | struct tegra_dma_channel; | ||
165 | |||
166 | typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, | ||
167 | bool to_terminate); | ||
168 | |||
169 | /* tegra_dma_channel: Channel specific information */ | ||
170 | struct tegra_dma_channel { | ||
171 | struct dma_chan dma_chan; | ||
172 | bool config_init; | ||
173 | int id; | ||
174 | int irq; | ||
175 | unsigned long chan_base_offset; | ||
176 | spinlock_t lock; | ||
177 | bool busy; | ||
178 | struct tegra_dma *tdma; | ||
179 | bool cyclic; | ||
180 | |||
181 | /* Different lists for managing the requests */ | ||
182 | struct list_head free_sg_req; | ||
183 | struct list_head pending_sg_req; | ||
184 | struct list_head free_dma_desc; | ||
185 | struct list_head cb_desc; | ||
186 | |||
187 | /* ISR handler and tasklet for bottom half of isr handling */ | ||
188 | dma_isr_handler isr_handler; | ||
189 | struct tasklet_struct tasklet; | ||
190 | dma_async_tx_callback callback; | ||
191 | void *callback_param; | ||
192 | |||
193 | /* Channel-slave specific configuration */ | ||
194 | struct dma_slave_config dma_sconfig; | ||
195 | }; | ||
196 | |||
197 | /* tegra_dma: Tegra DMA specific information */ | ||
198 | struct tegra_dma { | ||
199 | struct dma_device dma_dev; | ||
200 | struct device *dev; | ||
201 | struct clk *dma_clk; | ||
202 | spinlock_t global_lock; | ||
203 | void __iomem *base_addr; | ||
204 | struct tegra_dma_chip_data *chip_data; | ||
205 | |||
206 | /* Some register need to be cache before suspend */ | ||
207 | u32 reg_gen; | ||
208 | |||
209 | /* Last member of the structure */ | ||
210 | struct tegra_dma_channel channels[0]; | ||
211 | }; | ||
212 | |||
213 | static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) | ||
214 | { | ||
215 | writel(val, tdma->base_addr + reg); | ||
216 | } | ||
217 | |||
218 | static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg) | ||
219 | { | ||
220 | return readl(tdma->base_addr + reg); | ||
221 | } | ||
222 | |||
223 | static inline void tdc_write(struct tegra_dma_channel *tdc, | ||
224 | u32 reg, u32 val) | ||
225 | { | ||
226 | writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); | ||
227 | } | ||
228 | |||
229 | static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) | ||
230 | { | ||
231 | return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg); | ||
232 | } | ||
233 | |||
234 | static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) | ||
235 | { | ||
236 | return container_of(dc, struct tegra_dma_channel, dma_chan); | ||
237 | } | ||
238 | |||
239 | static inline struct tegra_dma_desc *txd_to_tegra_dma_desc( | ||
240 | struct dma_async_tx_descriptor *td) | ||
241 | { | ||
242 | return container_of(td, struct tegra_dma_desc, txd); | ||
243 | } | ||
244 | |||
245 | static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) | ||
246 | { | ||
247 | return &tdc->dma_chan.dev->device; | ||
248 | } | ||
249 | |||
250 | static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx); | ||
251 | static int tegra_dma_runtime_suspend(struct device *dev); | ||
252 | static int tegra_dma_runtime_resume(struct device *dev); | ||
253 | |||
254 | /* Get DMA desc from free list, if not there then allocate it. */ | ||
255 | static struct tegra_dma_desc *tegra_dma_desc_get( | ||
256 | struct tegra_dma_channel *tdc) | ||
257 | { | ||
258 | struct tegra_dma_desc *dma_desc; | ||
259 | unsigned long flags; | ||
260 | |||
261 | spin_lock_irqsave(&tdc->lock, flags); | ||
262 | |||
263 | /* Do not allocate if desc are waiting for ack */ | ||
264 | list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { | ||
265 | if (async_tx_test_ack(&dma_desc->txd)) { | ||
266 | list_del(&dma_desc->node); | ||
267 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
268 | return dma_desc; | ||
269 | } | ||
270 | } | ||
271 | |||
272 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
273 | |||
274 | /* Allocate DMA desc */ | ||
275 | dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC); | ||
276 | if (!dma_desc) { | ||
277 | dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); | ||
278 | return NULL; | ||
279 | } | ||
280 | |||
281 | dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); | ||
282 | dma_desc->txd.tx_submit = tegra_dma_tx_submit; | ||
283 | dma_desc->txd.flags = 0; | ||
284 | return dma_desc; | ||
285 | } | ||
286 | |||
287 | static void tegra_dma_desc_put(struct tegra_dma_channel *tdc, | ||
288 | struct tegra_dma_desc *dma_desc) | ||
289 | { | ||
290 | unsigned long flags; | ||
291 | |||
292 | spin_lock_irqsave(&tdc->lock, flags); | ||
293 | if (!list_empty(&dma_desc->tx_list)) | ||
294 | list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req); | ||
295 | list_add_tail(&dma_desc->node, &tdc->free_dma_desc); | ||
296 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
297 | } | ||
298 | |||
299 | static struct tegra_dma_sg_req *tegra_dma_sg_req_get( | ||
300 | struct tegra_dma_channel *tdc) | ||
301 | { | ||
302 | struct tegra_dma_sg_req *sg_req = NULL; | ||
303 | unsigned long flags; | ||
304 | |||
305 | spin_lock_irqsave(&tdc->lock, flags); | ||
306 | if (!list_empty(&tdc->free_sg_req)) { | ||
307 | sg_req = list_first_entry(&tdc->free_sg_req, | ||
308 | typeof(*sg_req), node); | ||
309 | list_del(&sg_req->node); | ||
310 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
311 | return sg_req; | ||
312 | } | ||
313 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
314 | |||
315 | sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC); | ||
316 | if (!sg_req) | ||
317 | dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); | ||
318 | return sg_req; | ||
319 | } | ||
320 | |||
321 | static int tegra_dma_slave_config(struct dma_chan *dc, | ||
322 | struct dma_slave_config *sconfig) | ||
323 | { | ||
324 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
325 | |||
326 | if (!list_empty(&tdc->pending_sg_req)) { | ||
327 | dev_err(tdc2dev(tdc), "Configuration not allowed\n"); | ||
328 | return -EBUSY; | ||
329 | } | ||
330 | |||
331 | memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); | ||
332 | tdc->config_init = true; | ||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | static void tegra_dma_global_pause(struct tegra_dma_channel *tdc, | ||
337 | bool wait_for_burst_complete) | ||
338 | { | ||
339 | struct tegra_dma *tdma = tdc->tdma; | ||
340 | |||
341 | spin_lock(&tdma->global_lock); | ||
342 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); | ||
343 | if (wait_for_burst_complete) | ||
344 | udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); | ||
345 | } | ||
346 | |||
347 | static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) | ||
348 | { | ||
349 | struct tegra_dma *tdma = tdc->tdma; | ||
350 | |||
351 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); | ||
352 | spin_unlock(&tdma->global_lock); | ||
353 | } | ||
354 | |||
355 | static void tegra_dma_stop(struct tegra_dma_channel *tdc) | ||
356 | { | ||
357 | u32 csr; | ||
358 | u32 status; | ||
359 | |||
360 | /* Disable interrupts */ | ||
361 | csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); | ||
362 | csr &= ~TEGRA_APBDMA_CSR_IE_EOC; | ||
363 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); | ||
364 | |||
365 | /* Disable DMA */ | ||
366 | csr &= ~TEGRA_APBDMA_CSR_ENB; | ||
367 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); | ||
368 | |||
369 | /* Clear interrupt status if it is there */ | ||
370 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
371 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
372 | dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); | ||
373 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); | ||
374 | } | ||
375 | tdc->busy = false; | ||
376 | } | ||
377 | |||
378 | static void tegra_dma_start(struct tegra_dma_channel *tdc, | ||
379 | struct tegra_dma_sg_req *sg_req) | ||
380 | { | ||
381 | struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs; | ||
382 | |||
383 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr); | ||
384 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq); | ||
385 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); | ||
386 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); | ||
387 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); | ||
388 | |||
389 | /* Start DMA */ | ||
390 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, | ||
391 | ch_regs->csr | TEGRA_APBDMA_CSR_ENB); | ||
392 | } | ||
393 | |||
394 | static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, | ||
395 | struct tegra_dma_sg_req *nsg_req) | ||
396 | { | ||
397 | unsigned long status; | ||
398 | |||
399 | /* | ||
400 | * The DMA controller reloads the new configuration for next transfer | ||
401 | * after last burst of current transfer completes. | ||
402 | * If there is no IEC status then this makes sure that last burst | ||
403 | * has not be completed. There may be case that last burst is on | ||
404 | * flight and so it can complete but because DMA is paused, it | ||
405 | * will not generates interrupt as well as not reload the new | ||
406 | * configuration. | ||
407 | * If there is already IEC status then interrupt handler need to | ||
408 | * load new configuration. | ||
409 | */ | ||
410 | tegra_dma_global_pause(tdc, false); | ||
411 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
412 | |||
413 | /* | ||
414 | * If interrupt is pending then do nothing as the ISR will handle | ||
415 | * the programing for new request. | ||
416 | */ | ||
417 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
418 | dev_err(tdc2dev(tdc), | ||
419 | "Skipping new configuration as interrupt is pending\n"); | ||
420 | tegra_dma_global_resume(tdc); | ||
421 | return; | ||
422 | } | ||
423 | |||
424 | /* Safe to program new configuration */ | ||
425 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); | ||
426 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); | ||
427 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, | ||
428 | nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); | ||
429 | nsg_req->configured = true; | ||
430 | |||
431 | tegra_dma_global_resume(tdc); | ||
432 | } | ||
433 | |||
434 | static void tdc_start_head_req(struct tegra_dma_channel *tdc) | ||
435 | { | ||
436 | struct tegra_dma_sg_req *sg_req; | ||
437 | |||
438 | if (list_empty(&tdc->pending_sg_req)) | ||
439 | return; | ||
440 | |||
441 | sg_req = list_first_entry(&tdc->pending_sg_req, | ||
442 | typeof(*sg_req), node); | ||
443 | tegra_dma_start(tdc, sg_req); | ||
444 | sg_req->configured = true; | ||
445 | tdc->busy = true; | ||
446 | } | ||
447 | |||
448 | static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc) | ||
449 | { | ||
450 | struct tegra_dma_sg_req *hsgreq; | ||
451 | struct tegra_dma_sg_req *hnsgreq; | ||
452 | |||
453 | if (list_empty(&tdc->pending_sg_req)) | ||
454 | return; | ||
455 | |||
456 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); | ||
457 | if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) { | ||
458 | hnsgreq = list_first_entry(&hsgreq->node, | ||
459 | typeof(*hnsgreq), node); | ||
460 | tegra_dma_configure_for_next(tdc, hnsgreq); | ||
461 | } | ||
462 | } | ||
463 | |||
464 | static inline int get_current_xferred_count(struct tegra_dma_channel *tdc, | ||
465 | struct tegra_dma_sg_req *sg_req, unsigned long status) | ||
466 | { | ||
467 | return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4; | ||
468 | } | ||
469 | |||
470 | static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) | ||
471 | { | ||
472 | struct tegra_dma_sg_req *sgreq; | ||
473 | struct tegra_dma_desc *dma_desc; | ||
474 | |||
475 | while (!list_empty(&tdc->pending_sg_req)) { | ||
476 | sgreq = list_first_entry(&tdc->pending_sg_req, | ||
477 | typeof(*sgreq), node); | ||
478 | list_del(&sgreq->node); | ||
479 | list_add_tail(&sgreq->node, &tdc->free_sg_req); | ||
480 | if (sgreq->last_sg) { | ||
481 | dma_desc = sgreq->dma_desc; | ||
482 | dma_desc->dma_status = DMA_ERROR; | ||
483 | list_add_tail(&dma_desc->node, &tdc->free_dma_desc); | ||
484 | |||
485 | /* Add in cb list if it is not there. */ | ||
486 | if (!dma_desc->cb_count) | ||
487 | list_add_tail(&dma_desc->cb_node, | ||
488 | &tdc->cb_desc); | ||
489 | dma_desc->cb_count++; | ||
490 | } | ||
491 | } | ||
492 | tdc->isr_handler = NULL; | ||
493 | } | ||
494 | |||
495 | static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, | ||
496 | struct tegra_dma_sg_req *last_sg_req, bool to_terminate) | ||
497 | { | ||
498 | struct tegra_dma_sg_req *hsgreq = NULL; | ||
499 | |||
500 | if (list_empty(&tdc->pending_sg_req)) { | ||
501 | dev_err(tdc2dev(tdc), "Dma is running without req\n"); | ||
502 | tegra_dma_stop(tdc); | ||
503 | return false; | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * Check that head req on list should be in flight. | ||
508 | * If it is not in flight then abort transfer as | ||
509 | * looping of transfer can not continue. | ||
510 | */ | ||
511 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); | ||
512 | if (!hsgreq->configured) { | ||
513 | tegra_dma_stop(tdc); | ||
514 | dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n"); | ||
515 | tegra_dma_abort_all(tdc); | ||
516 | return false; | ||
517 | } | ||
518 | |||
519 | /* Configure next request */ | ||
520 | if (!to_terminate) | ||
521 | tdc_configure_next_head_desc(tdc); | ||
522 | return true; | ||
523 | } | ||
524 | |||
525 | static void handle_once_dma_done(struct tegra_dma_channel *tdc, | ||
526 | bool to_terminate) | ||
527 | { | ||
528 | struct tegra_dma_sg_req *sgreq; | ||
529 | struct tegra_dma_desc *dma_desc; | ||
530 | |||
531 | tdc->busy = false; | ||
532 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); | ||
533 | dma_desc = sgreq->dma_desc; | ||
534 | dma_desc->bytes_transferred += sgreq->req_len; | ||
535 | |||
536 | list_del(&sgreq->node); | ||
537 | if (sgreq->last_sg) { | ||
538 | dma_desc->dma_status = DMA_SUCCESS; | ||
539 | dma_cookie_complete(&dma_desc->txd); | ||
540 | if (!dma_desc->cb_count) | ||
541 | list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); | ||
542 | dma_desc->cb_count++; | ||
543 | list_add_tail(&dma_desc->node, &tdc->free_dma_desc); | ||
544 | } | ||
545 | list_add_tail(&sgreq->node, &tdc->free_sg_req); | ||
546 | |||
547 | /* Do not start DMA if it is going to be terminate */ | ||
548 | if (to_terminate || list_empty(&tdc->pending_sg_req)) | ||
549 | return; | ||
550 | |||
551 | tdc_start_head_req(tdc); | ||
552 | return; | ||
553 | } | ||
554 | |||
555 | static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, | ||
556 | bool to_terminate) | ||
557 | { | ||
558 | struct tegra_dma_sg_req *sgreq; | ||
559 | struct tegra_dma_desc *dma_desc; | ||
560 | bool st; | ||
561 | |||
562 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); | ||
563 | dma_desc = sgreq->dma_desc; | ||
564 | dma_desc->bytes_transferred += sgreq->req_len; | ||
565 | |||
566 | /* Callback need to be call */ | ||
567 | if (!dma_desc->cb_count) | ||
568 | list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); | ||
569 | dma_desc->cb_count++; | ||
570 | |||
571 | /* If not last req then put at end of pending list */ | ||
572 | if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { | ||
573 | list_del(&sgreq->node); | ||
574 | list_add_tail(&sgreq->node, &tdc->pending_sg_req); | ||
575 | sgreq->configured = false; | ||
576 | st = handle_continuous_head_request(tdc, sgreq, to_terminate); | ||
577 | if (!st) | ||
578 | dma_desc->dma_status = DMA_ERROR; | ||
579 | } | ||
580 | return; | ||
581 | } | ||
582 | |||
583 | static void tegra_dma_tasklet(unsigned long data) | ||
584 | { | ||
585 | struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; | ||
586 | dma_async_tx_callback callback = NULL; | ||
587 | void *callback_param = NULL; | ||
588 | struct tegra_dma_desc *dma_desc; | ||
589 | unsigned long flags; | ||
590 | int cb_count; | ||
591 | |||
592 | spin_lock_irqsave(&tdc->lock, flags); | ||
593 | while (!list_empty(&tdc->cb_desc)) { | ||
594 | dma_desc = list_first_entry(&tdc->cb_desc, | ||
595 | typeof(*dma_desc), cb_node); | ||
596 | list_del(&dma_desc->cb_node); | ||
597 | callback = dma_desc->txd.callback; | ||
598 | callback_param = dma_desc->txd.callback_param; | ||
599 | cb_count = dma_desc->cb_count; | ||
600 | dma_desc->cb_count = 0; | ||
601 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
602 | while (cb_count-- && callback) | ||
603 | callback(callback_param); | ||
604 | spin_lock_irqsave(&tdc->lock, flags); | ||
605 | } | ||
606 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
607 | } | ||
608 | |||
609 | static irqreturn_t tegra_dma_isr(int irq, void *dev_id) | ||
610 | { | ||
611 | struct tegra_dma_channel *tdc = dev_id; | ||
612 | unsigned long status; | ||
613 | unsigned long flags; | ||
614 | |||
615 | spin_lock_irqsave(&tdc->lock, flags); | ||
616 | |||
617 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
618 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
619 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); | ||
620 | tdc->isr_handler(tdc, false); | ||
621 | tasklet_schedule(&tdc->tasklet); | ||
622 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
623 | return IRQ_HANDLED; | ||
624 | } | ||
625 | |||
626 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
627 | dev_info(tdc2dev(tdc), | ||
628 | "Interrupt already served status 0x%08lx\n", status); | ||
629 | return IRQ_NONE; | ||
630 | } | ||
631 | |||
632 | static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd) | ||
633 | { | ||
634 | struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd); | ||
635 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan); | ||
636 | unsigned long flags; | ||
637 | dma_cookie_t cookie; | ||
638 | |||
639 | spin_lock_irqsave(&tdc->lock, flags); | ||
640 | dma_desc->dma_status = DMA_IN_PROGRESS; | ||
641 | cookie = dma_cookie_assign(&dma_desc->txd); | ||
642 | list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req); | ||
643 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
644 | return cookie; | ||
645 | } | ||
646 | |||
647 | static void tegra_dma_issue_pending(struct dma_chan *dc) | ||
648 | { | ||
649 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
650 | unsigned long flags; | ||
651 | |||
652 | spin_lock_irqsave(&tdc->lock, flags); | ||
653 | if (list_empty(&tdc->pending_sg_req)) { | ||
654 | dev_err(tdc2dev(tdc), "No DMA request\n"); | ||
655 | goto end; | ||
656 | } | ||
657 | if (!tdc->busy) { | ||
658 | tdc_start_head_req(tdc); | ||
659 | |||
660 | /* Continuous single mode: Configure next req */ | ||
661 | if (tdc->cyclic) { | ||
662 | /* | ||
663 | * Wait for 1 burst time for configure DMA for | ||
664 | * next transfer. | ||
665 | */ | ||
666 | udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); | ||
667 | tdc_configure_next_head_desc(tdc); | ||
668 | } | ||
669 | } | ||
670 | end: | ||
671 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
672 | return; | ||
673 | } | ||
674 | |||
675 | static void tegra_dma_terminate_all(struct dma_chan *dc) | ||
676 | { | ||
677 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
678 | struct tegra_dma_sg_req *sgreq; | ||
679 | struct tegra_dma_desc *dma_desc; | ||
680 | unsigned long flags; | ||
681 | unsigned long status; | ||
682 | bool was_busy; | ||
683 | |||
684 | spin_lock_irqsave(&tdc->lock, flags); | ||
685 | if (list_empty(&tdc->pending_sg_req)) { | ||
686 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
687 | return; | ||
688 | } | ||
689 | |||
690 | if (!tdc->busy) | ||
691 | goto skip_dma_stop; | ||
692 | |||
693 | /* Pause DMA before checking the queue status */ | ||
694 | tegra_dma_global_pause(tdc, true); | ||
695 | |||
696 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
697 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | ||
698 | dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__); | ||
699 | tdc->isr_handler(tdc, true); | ||
700 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | ||
701 | } | ||
702 | |||
703 | was_busy = tdc->busy; | ||
704 | tegra_dma_stop(tdc); | ||
705 | |||
706 | if (!list_empty(&tdc->pending_sg_req) && was_busy) { | ||
707 | sgreq = list_first_entry(&tdc->pending_sg_req, | ||
708 | typeof(*sgreq), node); | ||
709 | sgreq->dma_desc->bytes_transferred += | ||
710 | get_current_xferred_count(tdc, sgreq, status); | ||
711 | } | ||
712 | tegra_dma_global_resume(tdc); | ||
713 | |||
714 | skip_dma_stop: | ||
715 | tegra_dma_abort_all(tdc); | ||
716 | |||
717 | while (!list_empty(&tdc->cb_desc)) { | ||
718 | dma_desc = list_first_entry(&tdc->cb_desc, | ||
719 | typeof(*dma_desc), cb_node); | ||
720 | list_del(&dma_desc->cb_node); | ||
721 | dma_desc->cb_count = 0; | ||
722 | } | ||
723 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
724 | } | ||
725 | |||
726 | static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, | ||
727 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
728 | { | ||
729 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
730 | struct tegra_dma_desc *dma_desc; | ||
731 | struct tegra_dma_sg_req *sg_req; | ||
732 | enum dma_status ret; | ||
733 | unsigned long flags; | ||
734 | |||
735 | spin_lock_irqsave(&tdc->lock, flags); | ||
736 | |||
737 | ret = dma_cookie_status(dc, cookie, txstate); | ||
738 | if (ret == DMA_SUCCESS) { | ||
739 | dma_set_residue(txstate, 0); | ||
740 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
741 | return ret; | ||
742 | } | ||
743 | |||
744 | /* Check on wait_ack desc status */ | ||
745 | list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { | ||
746 | if (dma_desc->txd.cookie == cookie) { | ||
747 | dma_set_residue(txstate, | ||
748 | dma_desc->bytes_requested - | ||
749 | dma_desc->bytes_transferred); | ||
750 | ret = dma_desc->dma_status; | ||
751 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
752 | return ret; | ||
753 | } | ||
754 | } | ||
755 | |||
756 | /* Check in pending list */ | ||
757 | list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { | ||
758 | dma_desc = sg_req->dma_desc; | ||
759 | if (dma_desc->txd.cookie == cookie) { | ||
760 | dma_set_residue(txstate, | ||
761 | dma_desc->bytes_requested - | ||
762 | dma_desc->bytes_transferred); | ||
763 | ret = dma_desc->dma_status; | ||
764 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
765 | return ret; | ||
766 | } | ||
767 | } | ||
768 | |||
769 | dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie); | ||
770 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
771 | return ret; | ||
772 | } | ||
773 | |||
774 | static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd, | ||
775 | unsigned long arg) | ||
776 | { | ||
777 | switch (cmd) { | ||
778 | case DMA_SLAVE_CONFIG: | ||
779 | return tegra_dma_slave_config(dc, | ||
780 | (struct dma_slave_config *)arg); | ||
781 | |||
782 | case DMA_TERMINATE_ALL: | ||
783 | tegra_dma_terminate_all(dc); | ||
784 | return 0; | ||
785 | |||
786 | default: | ||
787 | break; | ||
788 | } | ||
789 | |||
790 | return -ENXIO; | ||
791 | } | ||
792 | |||
793 | static inline int get_bus_width(struct tegra_dma_channel *tdc, | ||
794 | enum dma_slave_buswidth slave_bw) | ||
795 | { | ||
796 | switch (slave_bw) { | ||
797 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
798 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8; | ||
799 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
800 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16; | ||
801 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
802 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; | ||
803 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
804 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64; | ||
805 | default: | ||
806 | dev_warn(tdc2dev(tdc), | ||
807 | "slave bw is not supported, using 32bits\n"); | ||
808 | return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; | ||
809 | } | ||
810 | } | ||
811 | |||
812 | static inline int get_burst_size(struct tegra_dma_channel *tdc, | ||
813 | u32 burst_size, enum dma_slave_buswidth slave_bw, int len) | ||
814 | { | ||
815 | int burst_byte; | ||
816 | int burst_ahb_width; | ||
817 | |||
818 | /* | ||
819 | * burst_size from client is in terms of the bus_width. | ||
820 | * convert them into AHB memory width which is 4 byte. | ||
821 | */ | ||
822 | burst_byte = burst_size * slave_bw; | ||
823 | burst_ahb_width = burst_byte / 4; | ||
824 | |||
825 | /* If burst size is 0 then calculate the burst size based on length */ | ||
826 | if (!burst_ahb_width) { | ||
827 | if (len & 0xF) | ||
828 | return TEGRA_APBDMA_AHBSEQ_BURST_1; | ||
829 | else if ((len >> 4) & 0x1) | ||
830 | return TEGRA_APBDMA_AHBSEQ_BURST_4; | ||
831 | else | ||
832 | return TEGRA_APBDMA_AHBSEQ_BURST_8; | ||
833 | } | ||
834 | if (burst_ahb_width < 4) | ||
835 | return TEGRA_APBDMA_AHBSEQ_BURST_1; | ||
836 | else if (burst_ahb_width < 8) | ||
837 | return TEGRA_APBDMA_AHBSEQ_BURST_4; | ||
838 | else | ||
839 | return TEGRA_APBDMA_AHBSEQ_BURST_8; | ||
840 | } | ||
841 | |||
842 | static int get_transfer_param(struct tegra_dma_channel *tdc, | ||
843 | enum dma_transfer_direction direction, unsigned long *apb_addr, | ||
844 | unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size, | ||
845 | enum dma_slave_buswidth *slave_bw) | ||
846 | { | ||
847 | |||
848 | switch (direction) { | ||
849 | case DMA_MEM_TO_DEV: | ||
850 | *apb_addr = tdc->dma_sconfig.dst_addr; | ||
851 | *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); | ||
852 | *burst_size = tdc->dma_sconfig.dst_maxburst; | ||
853 | *slave_bw = tdc->dma_sconfig.dst_addr_width; | ||
854 | *csr = TEGRA_APBDMA_CSR_DIR; | ||
855 | return 0; | ||
856 | |||
857 | case DMA_DEV_TO_MEM: | ||
858 | *apb_addr = tdc->dma_sconfig.src_addr; | ||
859 | *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); | ||
860 | *burst_size = tdc->dma_sconfig.src_maxburst; | ||
861 | *slave_bw = tdc->dma_sconfig.src_addr_width; | ||
862 | *csr = 0; | ||
863 | return 0; | ||
864 | |||
865 | default: | ||
866 | dev_err(tdc2dev(tdc), "Dma direction is not supported\n"); | ||
867 | return -EINVAL; | ||
868 | } | ||
869 | return -EINVAL; | ||
870 | } | ||
871 | |||
872 | static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | ||
873 | struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, | ||
874 | enum dma_transfer_direction direction, unsigned long flags, | ||
875 | void *context) | ||
876 | { | ||
877 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
878 | struct tegra_dma_desc *dma_desc; | ||
879 | unsigned int i; | ||
880 | struct scatterlist *sg; | ||
881 | unsigned long csr, ahb_seq, apb_ptr, apb_seq; | ||
882 | struct list_head req_list; | ||
883 | struct tegra_dma_sg_req *sg_req = NULL; | ||
884 | u32 burst_size; | ||
885 | enum dma_slave_buswidth slave_bw; | ||
886 | int ret; | ||
887 | |||
888 | if (!tdc->config_init) { | ||
889 | dev_err(tdc2dev(tdc), "dma channel is not configured\n"); | ||
890 | return NULL; | ||
891 | } | ||
892 | if (sg_len < 1) { | ||
893 | dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); | ||
894 | return NULL; | ||
895 | } | ||
896 | |||
897 | ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, | ||
898 | &burst_size, &slave_bw); | ||
899 | if (ret < 0) | ||
900 | return NULL; | ||
901 | |||
902 | INIT_LIST_HEAD(&req_list); | ||
903 | |||
904 | ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; | ||
905 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << | ||
906 | TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; | ||
907 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; | ||
908 | |||
909 | csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW; | ||
910 | csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; | ||
911 | if (flags & DMA_PREP_INTERRUPT) | ||
912 | csr |= TEGRA_APBDMA_CSR_IE_EOC; | ||
913 | |||
914 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; | ||
915 | |||
916 | dma_desc = tegra_dma_desc_get(tdc); | ||
917 | if (!dma_desc) { | ||
918 | dev_err(tdc2dev(tdc), "Dma descriptors not available\n"); | ||
919 | return NULL; | ||
920 | } | ||
921 | INIT_LIST_HEAD(&dma_desc->tx_list); | ||
922 | INIT_LIST_HEAD(&dma_desc->cb_node); | ||
923 | dma_desc->cb_count = 0; | ||
924 | dma_desc->bytes_requested = 0; | ||
925 | dma_desc->bytes_transferred = 0; | ||
926 | dma_desc->dma_status = DMA_IN_PROGRESS; | ||
927 | |||
928 | /* Make transfer requests */ | ||
929 | for_each_sg(sgl, sg, sg_len, i) { | ||
930 | u32 len, mem; | ||
931 | |||
932 | mem = sg_phys(sg); | ||
933 | len = sg_dma_len(sg); | ||
934 | |||
935 | if ((len & 3) || (mem & 3) || | ||
936 | (len > tdc->tdma->chip_data->max_dma_count)) { | ||
937 | dev_err(tdc2dev(tdc), | ||
938 | "Dma length/memory address is not supported\n"); | ||
939 | tegra_dma_desc_put(tdc, dma_desc); | ||
940 | return NULL; | ||
941 | } | ||
942 | |||
943 | sg_req = tegra_dma_sg_req_get(tdc); | ||
944 | if (!sg_req) { | ||
945 | dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); | ||
946 | tegra_dma_desc_put(tdc, dma_desc); | ||
947 | return NULL; | ||
948 | } | ||
949 | |||
950 | ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); | ||
951 | dma_desc->bytes_requested += len; | ||
952 | |||
953 | sg_req->ch_regs.apb_ptr = apb_ptr; | ||
954 | sg_req->ch_regs.ahb_ptr = mem; | ||
955 | sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); | ||
956 | sg_req->ch_regs.apb_seq = apb_seq; | ||
957 | sg_req->ch_regs.ahb_seq = ahb_seq; | ||
958 | sg_req->configured = false; | ||
959 | sg_req->last_sg = false; | ||
960 | sg_req->dma_desc = dma_desc; | ||
961 | sg_req->req_len = len; | ||
962 | |||
963 | list_add_tail(&sg_req->node, &dma_desc->tx_list); | ||
964 | } | ||
965 | sg_req->last_sg = true; | ||
966 | if (flags & DMA_CTRL_ACK) | ||
967 | dma_desc->txd.flags = DMA_CTRL_ACK; | ||
968 | |||
969 | /* | ||
970 | * Make sure that mode should not be conflicting with currently | ||
971 | * configured mode. | ||
972 | */ | ||
973 | if (!tdc->isr_handler) { | ||
974 | tdc->isr_handler = handle_once_dma_done; | ||
975 | tdc->cyclic = false; | ||
976 | } else { | ||
977 | if (tdc->cyclic) { | ||
978 | dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n"); | ||
979 | tegra_dma_desc_put(tdc, dma_desc); | ||
980 | return NULL; | ||
981 | } | ||
982 | } | ||
983 | |||
984 | return &dma_desc->txd; | ||
985 | } | ||
986 | |||
987 | struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( | ||
988 | struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, | ||
989 | size_t period_len, enum dma_transfer_direction direction, | ||
990 | void *context) | ||
991 | { | ||
992 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
993 | struct tegra_dma_desc *dma_desc = NULL; | ||
994 | struct tegra_dma_sg_req *sg_req = NULL; | ||
995 | unsigned long csr, ahb_seq, apb_ptr, apb_seq; | ||
996 | int len; | ||
997 | size_t remain_len; | ||
998 | dma_addr_t mem = buf_addr; | ||
999 | u32 burst_size; | ||
1000 | enum dma_slave_buswidth slave_bw; | ||
1001 | int ret; | ||
1002 | |||
1003 | if (!buf_len || !period_len) { | ||
1004 | dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); | ||
1005 | return NULL; | ||
1006 | } | ||
1007 | |||
1008 | if (!tdc->config_init) { | ||
1009 | dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); | ||
1010 | return NULL; | ||
1011 | } | ||
1012 | |||
1013 | /* | ||
1014 | * We allow to take more number of requests till DMA is | ||
1015 | * not started. The driver will loop over all requests. | ||
1016 | * Once DMA is started then new requests can be queued only after | ||
1017 | * terminating the DMA. | ||
1018 | */ | ||
1019 | if (tdc->busy) { | ||
1020 | dev_err(tdc2dev(tdc), "Request not allowed when dma running\n"); | ||
1021 | return NULL; | ||
1022 | } | ||
1023 | |||
1024 | /* | ||
1025 | * We only support cycle transfer when buf_len is multiple of | ||
1026 | * period_len. | ||
1027 | */ | ||
1028 | if (buf_len % period_len) { | ||
1029 | dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); | ||
1030 | return NULL; | ||
1031 | } | ||
1032 | |||
1033 | len = period_len; | ||
1034 | if ((len & 3) || (buf_addr & 3) || | ||
1035 | (len > tdc->tdma->chip_data->max_dma_count)) { | ||
1036 | dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); | ||
1037 | return NULL; | ||
1038 | } | ||
1039 | |||
1040 | ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, | ||
1041 | &burst_size, &slave_bw); | ||
1042 | if (ret < 0) | ||
1043 | return NULL; | ||
1044 | |||
1045 | |||
1046 | ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; | ||
1047 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << | ||
1048 | TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; | ||
1049 | ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; | ||
1050 | |||
1051 | csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC; | ||
1052 | csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; | ||
1053 | |||
1054 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; | ||
1055 | |||
1056 | dma_desc = tegra_dma_desc_get(tdc); | ||
1057 | if (!dma_desc) { | ||
1058 | dev_err(tdc2dev(tdc), "not enough descriptors available\n"); | ||
1059 | return NULL; | ||
1060 | } | ||
1061 | |||
1062 | INIT_LIST_HEAD(&dma_desc->tx_list); | ||
1063 | INIT_LIST_HEAD(&dma_desc->cb_node); | ||
1064 | dma_desc->cb_count = 0; | ||
1065 | |||
1066 | dma_desc->bytes_transferred = 0; | ||
1067 | dma_desc->bytes_requested = buf_len; | ||
1068 | remain_len = buf_len; | ||
1069 | |||
1070 | /* Split transfer equal to period size */ | ||
1071 | while (remain_len) { | ||
1072 | sg_req = tegra_dma_sg_req_get(tdc); | ||
1073 | if (!sg_req) { | ||
1074 | dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); | ||
1075 | tegra_dma_desc_put(tdc, dma_desc); | ||
1076 | return NULL; | ||
1077 | } | ||
1078 | |||
1079 | ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); | ||
1080 | sg_req->ch_regs.apb_ptr = apb_ptr; | ||
1081 | sg_req->ch_regs.ahb_ptr = mem; | ||
1082 | sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); | ||
1083 | sg_req->ch_regs.apb_seq = apb_seq; | ||
1084 | sg_req->ch_regs.ahb_seq = ahb_seq; | ||
1085 | sg_req->configured = false; | ||
1086 | sg_req->half_done = false; | ||
1087 | sg_req->last_sg = false; | ||
1088 | sg_req->dma_desc = dma_desc; | ||
1089 | sg_req->req_len = len; | ||
1090 | |||
1091 | list_add_tail(&sg_req->node, &dma_desc->tx_list); | ||
1092 | remain_len -= len; | ||
1093 | mem += len; | ||
1094 | } | ||
1095 | sg_req->last_sg = true; | ||
1096 | dma_desc->txd.flags = DMA_CTRL_ACK; | ||
1097 | |||
1098 | /* | ||
1099 | * Make sure that mode should not be conflicting with currently | ||
1100 | * configured mode. | ||
1101 | */ | ||
1102 | if (!tdc->isr_handler) { | ||
1103 | tdc->isr_handler = handle_cont_sngl_cycle_dma_done; | ||
1104 | tdc->cyclic = true; | ||
1105 | } else { | ||
1106 | if (!tdc->cyclic) { | ||
1107 | dev_err(tdc2dev(tdc), "DMA configuration conflict\n"); | ||
1108 | tegra_dma_desc_put(tdc, dma_desc); | ||
1109 | return NULL; | ||
1110 | } | ||
1111 | } | ||
1112 | |||
1113 | return &dma_desc->txd; | ||
1114 | } | ||
1115 | |||
1116 | static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) | ||
1117 | { | ||
1118 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
1119 | |||
1120 | dma_cookie_init(&tdc->dma_chan); | ||
1121 | tdc->config_init = false; | ||
1122 | return 0; | ||
1123 | } | ||
1124 | |||
1125 | static void tegra_dma_free_chan_resources(struct dma_chan *dc) | ||
1126 | { | ||
1127 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | ||
1128 | |||
1129 | struct tegra_dma_desc *dma_desc; | ||
1130 | struct tegra_dma_sg_req *sg_req; | ||
1131 | struct list_head dma_desc_list; | ||
1132 | struct list_head sg_req_list; | ||
1133 | unsigned long flags; | ||
1134 | |||
1135 | INIT_LIST_HEAD(&dma_desc_list); | ||
1136 | INIT_LIST_HEAD(&sg_req_list); | ||
1137 | |||
1138 | dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); | ||
1139 | |||
1140 | if (tdc->busy) | ||
1141 | tegra_dma_terminate_all(dc); | ||
1142 | |||
1143 | spin_lock_irqsave(&tdc->lock, flags); | ||
1144 | list_splice_init(&tdc->pending_sg_req, &sg_req_list); | ||
1145 | list_splice_init(&tdc->free_sg_req, &sg_req_list); | ||
1146 | list_splice_init(&tdc->free_dma_desc, &dma_desc_list); | ||
1147 | INIT_LIST_HEAD(&tdc->cb_desc); | ||
1148 | tdc->config_init = false; | ||
1149 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
1150 | |||
1151 | while (!list_empty(&dma_desc_list)) { | ||
1152 | dma_desc = list_first_entry(&dma_desc_list, | ||
1153 | typeof(*dma_desc), node); | ||
1154 | list_del(&dma_desc->node); | ||
1155 | kfree(dma_desc); | ||
1156 | } | ||
1157 | |||
1158 | while (!list_empty(&sg_req_list)) { | ||
1159 | sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node); | ||
1160 | list_del(&sg_req->node); | ||
1161 | kfree(sg_req); | ||
1162 | } | ||
1163 | } | ||
1164 | |||
1165 | /* Tegra20 specific DMA controller information */ | ||
1166 | static struct tegra_dma_chip_data tegra20_dma_chip_data = { | ||
1167 | .nr_channels = 16, | ||
1168 | .max_dma_count = 1024UL * 64, | ||
1169 | }; | ||
1170 | |||
1171 | #if defined(CONFIG_OF) | ||
1172 | /* Tegra30 specific DMA controller information */ | ||
1173 | static struct tegra_dma_chip_data tegra30_dma_chip_data = { | ||
1174 | .nr_channels = 32, | ||
1175 | .max_dma_count = 1024UL * 64, | ||
1176 | }; | ||
1177 | |||
1178 | static const struct of_device_id tegra_dma_of_match[] __devinitconst = { | ||
1179 | { | ||
1180 | .compatible = "nvidia,tegra30-apbdma-new", | ||
1181 | .data = &tegra30_dma_chip_data, | ||
1182 | }, { | ||
1183 | .compatible = "nvidia,tegra20-apbdma-new", | ||
1184 | .data = &tegra20_dma_chip_data, | ||
1185 | }, { | ||
1186 | }, | ||
1187 | }; | ||
1188 | MODULE_DEVICE_TABLE(of, tegra_dma_of_match); | ||
1189 | #endif | ||
1190 | |||
1191 | static int __devinit tegra_dma_probe(struct platform_device *pdev) | ||
1192 | { | ||
1193 | struct resource *res; | ||
1194 | struct tegra_dma *tdma; | ||
1195 | int ret; | ||
1196 | int i; | ||
1197 | struct tegra_dma_chip_data *cdata = NULL; | ||
1198 | |||
1199 | if (pdev->dev.of_node) { | ||
1200 | const struct of_device_id *match; | ||
1201 | match = of_match_device(of_match_ptr(tegra_dma_of_match), | ||
1202 | &pdev->dev); | ||
1203 | if (!match) { | ||
1204 | dev_err(&pdev->dev, "Error: No device match found\n"); | ||
1205 | return -ENODEV; | ||
1206 | } | ||
1207 | cdata = match->data; | ||
1208 | } else { | ||
1209 | /* If no device tree then fallback to tegra20 */ | ||
1210 | cdata = &tegra20_dma_chip_data; | ||
1211 | } | ||
1212 | |||
1213 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * | ||
1214 | sizeof(struct tegra_dma_channel), GFP_KERNEL); | ||
1215 | if (!tdma) { | ||
1216 | dev_err(&pdev->dev, "Error: memory allocation failed\n"); | ||
1217 | return -ENOMEM; | ||
1218 | } | ||
1219 | |||
1220 | tdma->dev = &pdev->dev; | ||
1221 | tdma->chip_data = cdata; | ||
1222 | platform_set_drvdata(pdev, tdma); | ||
1223 | |||
1224 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1225 | if (!res) { | ||
1226 | dev_err(&pdev->dev, "No mem resource for DMA\n"); | ||
1227 | return -EINVAL; | ||
1228 | } | ||
1229 | |||
1230 | tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res); | ||
1231 | if (!tdma->base_addr) { | ||
1232 | dev_err(&pdev->dev, | ||
1233 | "Cannot request memregion/iomap dma address\n"); | ||
1234 | return -EADDRNOTAVAIL; | ||
1235 | } | ||
1236 | |||
1237 | tdma->dma_clk = devm_clk_get(&pdev->dev, NULL); | ||
1238 | if (IS_ERR(tdma->dma_clk)) { | ||
1239 | dev_err(&pdev->dev, "Error: Missing controller clock\n"); | ||
1240 | return PTR_ERR(tdma->dma_clk); | ||
1241 | } | ||
1242 | |||
1243 | spin_lock_init(&tdma->global_lock); | ||
1244 | |||
1245 | pm_runtime_enable(&pdev->dev); | ||
1246 | if (!pm_runtime_enabled(&pdev->dev)) { | ||
1247 | ret = tegra_dma_runtime_resume(&pdev->dev); | ||
1248 | if (ret) { | ||
1249 | dev_err(&pdev->dev, "dma_runtime_resume failed %d\n", | ||
1250 | ret); | ||
1251 | goto err_pm_disable; | ||
1252 | } | ||
1253 | } | ||
1254 | |||
1255 | /* Reset DMA controller */ | ||
1256 | tegra_periph_reset_assert(tdma->dma_clk); | ||
1257 | udelay(2); | ||
1258 | tegra_periph_reset_deassert(tdma->dma_clk); | ||
1259 | |||
1260 | /* Enable global DMA registers */ | ||
1261 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); | ||
1262 | tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); | ||
1263 | tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); | ||
1264 | |||
1265 | INIT_LIST_HEAD(&tdma->dma_dev.channels); | ||
1266 | for (i = 0; i < cdata->nr_channels; i++) { | ||
1267 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | ||
1268 | char irq_name[30]; | ||
1269 | |||
1270 | tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + | ||
1271 | i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; | ||
1272 | |||
1273 | res = platform_get_resource(pdev, IORESOURCE_IRQ, i); | ||
1274 | if (!res) { | ||
1275 | ret = -EINVAL; | ||
1276 | dev_err(&pdev->dev, "No irq resource for chan %d\n", i); | ||
1277 | goto err_irq; | ||
1278 | } | ||
1279 | tdc->irq = res->start; | ||
1280 | snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i); | ||
1281 | ret = devm_request_irq(&pdev->dev, tdc->irq, | ||
1282 | tegra_dma_isr, 0, irq_name, tdc); | ||
1283 | if (ret) { | ||
1284 | dev_err(&pdev->dev, | ||
1285 | "request_irq failed with err %d channel %d\n", | ||
1286 | i, ret); | ||
1287 | goto err_irq; | ||
1288 | } | ||
1289 | |||
1290 | tdc->dma_chan.device = &tdma->dma_dev; | ||
1291 | dma_cookie_init(&tdc->dma_chan); | ||
1292 | list_add_tail(&tdc->dma_chan.device_node, | ||
1293 | &tdma->dma_dev.channels); | ||
1294 | tdc->tdma = tdma; | ||
1295 | tdc->id = i; | ||
1296 | |||
1297 | tasklet_init(&tdc->tasklet, tegra_dma_tasklet, | ||
1298 | (unsigned long)tdc); | ||
1299 | spin_lock_init(&tdc->lock); | ||
1300 | |||
1301 | INIT_LIST_HEAD(&tdc->pending_sg_req); | ||
1302 | INIT_LIST_HEAD(&tdc->free_sg_req); | ||
1303 | INIT_LIST_HEAD(&tdc->free_dma_desc); | ||
1304 | INIT_LIST_HEAD(&tdc->cb_desc); | ||
1305 | } | ||
1306 | |||
1307 | dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); | ||
1308 | dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); | ||
1309 | tdma->dma_dev.dev = &pdev->dev; | ||
1310 | tdma->dma_dev.device_alloc_chan_resources = | ||
1311 | tegra_dma_alloc_chan_resources; | ||
1312 | tdma->dma_dev.device_free_chan_resources = | ||
1313 | tegra_dma_free_chan_resources; | ||
1314 | tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; | ||
1315 | tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; | ||
1316 | tdma->dma_dev.device_control = tegra_dma_device_control; | ||
1317 | tdma->dma_dev.device_tx_status = tegra_dma_tx_status; | ||
1318 | tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; | ||
1319 | |||
1320 | ret = dma_async_device_register(&tdma->dma_dev); | ||
1321 | if (ret < 0) { | ||
1322 | dev_err(&pdev->dev, | ||
1323 | "Tegra20 APB DMA driver registration failed %d\n", ret); | ||
1324 | goto err_irq; | ||
1325 | } | ||
1326 | |||
1327 | dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n", | ||
1328 | cdata->nr_channels); | ||
1329 | return 0; | ||
1330 | |||
1331 | err_irq: | ||
1332 | while (--i >= 0) { | ||
1333 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | ||
1334 | tasklet_kill(&tdc->tasklet); | ||
1335 | } | ||
1336 | |||
1337 | err_pm_disable: | ||
1338 | pm_runtime_disable(&pdev->dev); | ||
1339 | if (!pm_runtime_status_suspended(&pdev->dev)) | ||
1340 | tegra_dma_runtime_suspend(&pdev->dev); | ||
1341 | return ret; | ||
1342 | } | ||
1343 | |||
1344 | static int __devexit tegra_dma_remove(struct platform_device *pdev) | ||
1345 | { | ||
1346 | struct tegra_dma *tdma = platform_get_drvdata(pdev); | ||
1347 | int i; | ||
1348 | struct tegra_dma_channel *tdc; | ||
1349 | |||
1350 | dma_async_device_unregister(&tdma->dma_dev); | ||
1351 | |||
1352 | for (i = 0; i < tdma->chip_data->nr_channels; ++i) { | ||
1353 | tdc = &tdma->channels[i]; | ||
1354 | tasklet_kill(&tdc->tasklet); | ||
1355 | } | ||
1356 | |||
1357 | pm_runtime_disable(&pdev->dev); | ||
1358 | if (!pm_runtime_status_suspended(&pdev->dev)) | ||
1359 | tegra_dma_runtime_suspend(&pdev->dev); | ||
1360 | |||
1361 | return 0; | ||
1362 | } | ||
1363 | |||
1364 | static int tegra_dma_runtime_suspend(struct device *dev) | ||
1365 | { | ||
1366 | struct platform_device *pdev = to_platform_device(dev); | ||
1367 | struct tegra_dma *tdma = platform_get_drvdata(pdev); | ||
1368 | |||
1369 | clk_disable(tdma->dma_clk); | ||
1370 | return 0; | ||
1371 | } | ||
1372 | |||
1373 | static int tegra_dma_runtime_resume(struct device *dev) | ||
1374 | { | ||
1375 | struct platform_device *pdev = to_platform_device(dev); | ||
1376 | struct tegra_dma *tdma = platform_get_drvdata(pdev); | ||
1377 | int ret; | ||
1378 | |||
1379 | ret = clk_enable(tdma->dma_clk); | ||
1380 | if (ret < 0) { | ||
1381 | dev_err(dev, "clk_enable failed: %d\n", ret); | ||
1382 | return ret; | ||
1383 | } | ||
1384 | return 0; | ||
1385 | } | ||
1386 | |||
1387 | static const struct dev_pm_ops tegra_dma_dev_pm_ops __devinitconst = { | ||
1388 | #ifdef CONFIG_PM_RUNTIME | ||
1389 | .runtime_suspend = tegra_dma_runtime_suspend, | ||
1390 | .runtime_resume = tegra_dma_runtime_resume, | ||
1391 | #endif | ||
1392 | }; | ||
1393 | |||
1394 | static struct platform_driver tegra_dmac_driver = { | ||
1395 | .driver = { | ||
1396 | .name = "tegra20-apbdma", | ||
1397 | .owner = THIS_MODULE, | ||
1398 | .pm = &tegra_dma_dev_pm_ops, | ||
1399 | .of_match_table = of_match_ptr(tegra_dma_of_match), | ||
1400 | }, | ||
1401 | .probe = tegra_dma_probe, | ||
1402 | .remove = __devexit_p(tegra_dma_remove), | ||
1403 | }; | ||
1404 | |||
1405 | module_platform_driver(tegra_dmac_driver); | ||
1406 | |||
1407 | MODULE_ALIAS("platform:tegra20-apbdma"); | ||
1408 | MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver"); | ||
1409 | MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); | ||
1410 | MODULE_LICENSE("GPL v2"); | ||