diff options
Diffstat (limited to 'arch/arm/mach-tegra/dma.c')
-rw-r--r-- | arch/arm/mach-tegra/dma.c | 1203 |
1 files changed, 1203 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c new file mode 100644 index 00000000000..35499916e2b --- /dev/null +++ b/arch/arm/mach-tegra/dma.c | |||
@@ -0,0 +1,1203 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/dma.c | ||
3 | * | ||
4 | * System DMA driver for NVIDIA Tegra SoCs | ||
5 | * | ||
6 | * Copyright (c) 2008-2012, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/io.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/err.h> | ||
28 | #include <linux/irq.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/clk.h> | ||
31 | #include <linux/syscore_ops.h> | ||
32 | #include <mach/dma.h> | ||
33 | #include <mach/irqs.h> | ||
34 | #include <mach/iomap.h> | ||
35 | #include <mach/clk.h> | ||
36 | |||
37 | #define APB_DMA_GEN 0x000 | ||
38 | #define GEN_ENABLE (1<<31) | ||
39 | |||
40 | #define APB_DMA_CNTRL 0x010 | ||
41 | |||
42 | #define APB_DMA_IRQ_MASK 0x01c | ||
43 | |||
44 | #define APB_DMA_IRQ_MASK_SET 0x020 | ||
45 | |||
46 | #define APB_DMA_CHAN_CSR 0x000 | ||
47 | #define CSR_ENB (1<<31) | ||
48 | #define CSR_IE_EOC (1<<30) | ||
49 | #define CSR_HOLD (1<<29) | ||
50 | #define CSR_DIR (1<<28) | ||
51 | #define CSR_ONCE (1<<27) | ||
52 | #define CSR_FLOW (1<<21) | ||
53 | #define CSR_REQ_SEL_SHIFT 16 | ||
54 | #define CSR_WCOUNT_SHIFT 2 | ||
55 | #define CSR_WCOUNT_MASK 0xFFFC | ||
56 | |||
57 | #define APB_DMA_CHAN_STA 0x004 | ||
58 | #define STA_BUSY (1<<31) | ||
59 | #define STA_ISE_EOC (1<<30) | ||
60 | #define STA_HALT (1<<29) | ||
61 | #define STA_PING_PONG (1<<28) | ||
62 | #define STA_COUNT_SHIFT 2 | ||
63 | #define STA_COUNT_MASK 0xFFFC | ||
64 | |||
65 | #define APB_DMA_CHAN_AHB_PTR 0x010 | ||
66 | |||
67 | #define APB_DMA_CHAN_AHB_SEQ 0x014 | ||
68 | #define AHB_SEQ_INTR_ENB (1<<31) | ||
69 | #define AHB_SEQ_BUS_WIDTH_SHIFT 28 | ||
70 | #define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
71 | #define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
72 | #define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
73 | #define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
74 | #define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
75 | #define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT) | ||
76 | #define AHB_SEQ_DATA_SWAP (1<<27) | ||
77 | #define AHB_SEQ_BURST_MASK (0x7<<24) | ||
78 | #define AHB_SEQ_BURST_1 (4<<24) | ||
79 | #define AHB_SEQ_BURST_4 (5<<24) | ||
80 | #define AHB_SEQ_BURST_8 (6<<24) | ||
81 | #define AHB_SEQ_DBL_BUF (1<<19) | ||
82 | #define AHB_SEQ_WRAP_SHIFT 16 | ||
83 | #define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT) | ||
84 | |||
85 | #define APB_DMA_CHAN_APB_PTR 0x018 | ||
86 | |||
87 | #define APB_DMA_CHAN_APB_SEQ 0x01c | ||
88 | #define APB_SEQ_BUS_WIDTH_SHIFT 28 | ||
89 | #define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
90 | #define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
91 | #define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
92 | #define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
93 | #define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
94 | #define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT) | ||
95 | #define APB_SEQ_DATA_SWAP (1<<27) | ||
96 | #define APB_SEQ_WRAP_SHIFT 16 | ||
97 | #define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT) | ||
98 | |||
99 | #ifdef CONFIG_ARCH_TEGRA_2x_SOC | ||
100 | #define TEGRA_SYSTEM_DMA_CH_NR 16 | ||
101 | #else | ||
102 | #define TEGRA_SYSTEM_DMA_CH_NR 32 | ||
103 | #endif | ||
104 | #define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4 | ||
105 | #define TEGRA_SYSTEM_DMA_CH_MIN 0 | ||
106 | #define TEGRA_SYSTEM_DMA_CH_MAX \ | ||
107 | (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1) | ||
108 | |||
109 | /* Maximum dma transfer size */ | ||
110 | #define TEGRA_DMA_MAX_TRANSFER_SIZE 0x10000 | ||
111 | |||
112 | static struct clk *dma_clk; | ||
113 | |||
114 | static const unsigned int ahb_addr_wrap_table[8] = { | ||
115 | 0, 32, 64, 128, 256, 512, 1024, 2048 | ||
116 | }; | ||
117 | |||
118 | static const unsigned int apb_addr_wrap_table[8] = { | ||
119 | 0, 1, 2, 4, 8, 16, 32, 64 | ||
120 | }; | ||
121 | |||
122 | static const unsigned int bus_width_table[5] = { | ||
123 | 8, 16, 32, 64, 128 | ||
124 | }; | ||
125 | |||
126 | static void __iomem *general_dma_addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | ||
127 | typedef void (*dma_isr_handler)(struct tegra_dma_channel *ch); | ||
128 | |||
129 | #define TEGRA_DMA_NAME_SIZE 16 | ||
130 | struct tegra_dma_channel { | ||
131 | struct list_head list; | ||
132 | int id; | ||
133 | spinlock_t lock; | ||
134 | char name[TEGRA_DMA_NAME_SIZE]; | ||
135 | char client_name[TEGRA_DMA_NAME_SIZE]; | ||
136 | void __iomem *addr; | ||
137 | int mode; | ||
138 | int irq; | ||
139 | dma_callback callback; | ||
140 | struct tegra_dma_req *cb_req; | ||
141 | dma_isr_handler isr_handler; | ||
142 | }; | ||
143 | |||
144 | #define NV_DMA_MAX_CHANNELS 32 | ||
145 | |||
146 | static bool tegra_dma_initialized; | ||
147 | static DEFINE_MUTEX(tegra_dma_lock); | ||
148 | static DEFINE_SPINLOCK(enable_lock); | ||
149 | |||
150 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); | ||
151 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; | ||
152 | |||
153 | static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | ||
154 | struct tegra_dma_req *req); | ||
155 | static bool tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | ||
156 | struct tegra_dma_req *req); | ||
157 | static void handle_oneshot_dma(struct tegra_dma_channel *ch); | ||
158 | static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch); | ||
159 | static void handle_continuous_sngl_dma(struct tegra_dma_channel *ch); | ||
160 | |||
161 | void tegra_dma_flush(struct tegra_dma_channel *ch) | ||
162 | { | ||
163 | } | ||
164 | EXPORT_SYMBOL(tegra_dma_flush); | ||
165 | |||
166 | static void tegra_dma_stop(struct tegra_dma_channel *ch) | ||
167 | { | ||
168 | u32 csr; | ||
169 | u32 status; | ||
170 | |||
171 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); | ||
172 | csr &= ~CSR_IE_EOC; | ||
173 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | ||
174 | |||
175 | csr &= ~CSR_ENB; | ||
176 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | ||
177 | |||
178 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
179 | if (status & STA_ISE_EOC) | ||
180 | writel(status, ch->addr + APB_DMA_CHAN_STA); | ||
181 | } | ||
182 | |||
183 | int tegra_dma_cancel(struct tegra_dma_channel *ch) | ||
184 | { | ||
185 | unsigned long irq_flags; | ||
186 | |||
187 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
188 | while (!list_empty(&ch->list)) | ||
189 | list_del(ch->list.next); | ||
190 | |||
191 | tegra_dma_stop(ch); | ||
192 | |||
193 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
194 | return 0; | ||
195 | } | ||
196 | EXPORT_SYMBOL(tegra_dma_cancel); | ||
197 | |||
198 | static void pause_dma(bool wait_for_burst_complete) | ||
199 | { | ||
200 | spin_lock(&enable_lock); | ||
201 | writel(0, general_dma_addr + APB_DMA_GEN); | ||
202 | if (wait_for_burst_complete) | ||
203 | udelay(20); | ||
204 | } | ||
205 | |||
206 | static void resume_dma(void) | ||
207 | { | ||
208 | writel(GEN_ENABLE, general_dma_addr + APB_DMA_GEN); | ||
209 | spin_unlock(&enable_lock); | ||
210 | } | ||
211 | |||
212 | static void start_head_req(struct tegra_dma_channel *ch) | ||
213 | { | ||
214 | struct tegra_dma_req *head_req; | ||
215 | if (!list_empty(&ch->list)) { | ||
216 | head_req = list_entry(ch->list.next, typeof(*head_req), node); | ||
217 | tegra_dma_update_hw(ch, head_req); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | static void configure_next_req(struct tegra_dma_channel *ch, | ||
222 | struct tegra_dma_req *hreq) | ||
223 | { | ||
224 | struct tegra_dma_req *next_req; | ||
225 | if (!list_is_last(&hreq->node, &ch->list)) { | ||
226 | next_req = list_entry(hreq->node.next, typeof(*next_req), node); | ||
227 | tegra_dma_update_hw_partial(ch, next_req); | ||
228 | } | ||
229 | } | ||
230 | |||
231 | static inline unsigned int get_req_xfer_word_count( | ||
232 | struct tegra_dma_channel *ch, struct tegra_dma_req *req) | ||
233 | { | ||
234 | if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) | ||
235 | return req->size >> 3; | ||
236 | else | ||
237 | return req->size >> 2; | ||
238 | } | ||
239 | |||
240 | static int get_current_xferred_count(struct tegra_dma_channel *ch, | ||
241 | struct tegra_dma_req *req, unsigned long status) | ||
242 | { | ||
243 | int req_transfer_count; | ||
244 | req_transfer_count = get_req_xfer_word_count(ch, req) << 2; | ||
245 | return req_transfer_count - ((status & STA_COUNT_MASK) + 4); | ||
246 | } | ||
247 | |||
248 | static void tegra_dma_abort_req(struct tegra_dma_channel *ch, | ||
249 | struct tegra_dma_req *req, const char *warn_msg) | ||
250 | { | ||
251 | unsigned long status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
252 | |||
253 | /* | ||
254 | * Check if interrupt is pending. | ||
255 | * This api is called from isr and hence need not to call | ||
256 | * isr handle again, just update the byte_transferred. | ||
257 | */ | ||
258 | if (status & STA_ISE_EOC) | ||
259 | req->bytes_transferred += get_req_xfer_word_count(ch, req) << 2; | ||
260 | tegra_dma_stop(ch); | ||
261 | |||
262 | req->bytes_transferred += get_current_xferred_count(ch, req, status); | ||
263 | req->status = -TEGRA_DMA_REQ_ERROR_STOPPED; | ||
264 | if (warn_msg) | ||
265 | WARN(1, KERN_WARNING "%s\n", warn_msg); | ||
266 | start_head_req(ch); | ||
267 | } | ||
268 | |||
269 | static void handle_continuous_head_request(struct tegra_dma_channel *ch, | ||
270 | struct tegra_dma_req *last_req) | ||
271 | { | ||
272 | struct tegra_dma_req *hreq = NULL; | ||
273 | |||
274 | if (list_empty(&ch->list)) { | ||
275 | tegra_dma_abort_req(ch, last_req, NULL); | ||
276 | return; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * Check that head req on list should be in flight. | ||
281 | * If it is not in flight then request came late | ||
282 | * and so need to abort dma and start next request | ||
283 | * immediately. | ||
284 | */ | ||
285 | hreq = list_entry(ch->list.next, typeof(*hreq), node); | ||
286 | if (hreq->status != TEGRA_DMA_REQ_INFLIGHT) { | ||
287 | tegra_dma_abort_req(ch, last_req, "Req was not queued on time"); | ||
288 | return; | ||
289 | } | ||
290 | |||
291 | /* Configure next request in single buffer mode */ | ||
292 | if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE) | ||
293 | configure_next_req(ch, hreq); | ||
294 | } | ||
295 | |||
296 | static unsigned int get_channel_status(struct tegra_dma_channel *ch, | ||
297 | struct tegra_dma_req *req, bool is_stop_dma) | ||
298 | { | ||
299 | unsigned int status; | ||
300 | |||
301 | if (is_stop_dma) { | ||
302 | /* STOP the DMA and get the transfer count. | ||
303 | * Getting the transfer count is tricky. | ||
304 | * - Globally disable DMA on all channels | ||
305 | * - Read the channel's status register to know the number | ||
306 | * of pending bytes to be transfered. | ||
307 | * - Stop the dma channel | ||
308 | * - Globally re-enable DMA to resume other transfers | ||
309 | */ | ||
310 | pause_dma(true); | ||
311 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
312 | tegra_dma_stop(ch); | ||
313 | resume_dma(); | ||
314 | if (status & STA_ISE_EOC) { | ||
315 | pr_err("Got Dma Int here clearing"); | ||
316 | writel(status, ch->addr + APB_DMA_CHAN_STA); | ||
317 | } | ||
318 | req->status = TEGRA_DMA_REQ_ERROR_ABORTED; | ||
319 | } else { | ||
320 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
321 | } | ||
322 | return status; | ||
323 | } | ||
324 | |||
325 | /* should be called with the channel lock held */ | ||
326 | static unsigned int dma_active_count(struct tegra_dma_channel *ch, | ||
327 | struct tegra_dma_req *req, unsigned int status) | ||
328 | { | ||
329 | unsigned int to_transfer; | ||
330 | unsigned int req_transfer_count; | ||
331 | |||
332 | unsigned int bytes_transferred; | ||
333 | |||
334 | to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1; | ||
335 | req_transfer_count = get_req_xfer_word_count(ch, req); | ||
336 | bytes_transferred = req_transfer_count; | ||
337 | |||
338 | if (status & STA_BUSY) | ||
339 | bytes_transferred -= to_transfer; | ||
340 | |||
341 | /* | ||
342 | * In continuous transfer mode, DMA only tracks the count of the | ||
343 | * half DMA buffer. So, if the DMA already finished half the DMA | ||
344 | * then add the half buffer to the completed count. | ||
345 | */ | ||
346 | if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) | ||
347 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) | ||
348 | bytes_transferred += req_transfer_count; | ||
349 | |||
350 | if (status & STA_ISE_EOC) | ||
351 | bytes_transferred += req_transfer_count; | ||
352 | |||
353 | bytes_transferred *= 4; | ||
354 | |||
355 | return bytes_transferred; | ||
356 | } | ||
357 | |||
358 | int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | ||
359 | struct tegra_dma_req *_req) | ||
360 | { | ||
361 | struct tegra_dma_req *req = NULL; | ||
362 | int found = 0; | ||
363 | unsigned int status; | ||
364 | unsigned long irq_flags; | ||
365 | int stop = 0; | ||
366 | |||
367 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
368 | |||
369 | if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req) | ||
370 | stop = 1; | ||
371 | |||
372 | list_for_each_entry(req, &ch->list, node) { | ||
373 | if (req == _req) { | ||
374 | list_del(&req->node); | ||
375 | found = 1; | ||
376 | break; | ||
377 | } | ||
378 | } | ||
379 | if (!found) { | ||
380 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
381 | return -ENOENT; | ||
382 | } | ||
383 | |||
384 | if (!stop) | ||
385 | goto skip_status; | ||
386 | |||
387 | status = get_channel_status(ch, req, true); | ||
388 | req->bytes_transferred = dma_active_count(ch, req, status); | ||
389 | |||
390 | if (!list_empty(&ch->list)) { | ||
391 | /* if the list is not empty, queue the next request */ | ||
392 | struct tegra_dma_req *next_req; | ||
393 | next_req = list_entry(ch->list.next, | ||
394 | typeof(*next_req), node); | ||
395 | tegra_dma_update_hw(ch, next_req); | ||
396 | } | ||
397 | skip_status: | ||
398 | req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; | ||
399 | |||
400 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
401 | |||
402 | /* Callback should be called without any lock */ | ||
403 | req->complete(req); | ||
404 | return 0; | ||
405 | } | ||
406 | EXPORT_SYMBOL(tegra_dma_dequeue_req); | ||
407 | |||
408 | bool tegra_dma_is_empty(struct tegra_dma_channel *ch) | ||
409 | { | ||
410 | unsigned long irq_flags; | ||
411 | bool is_empty; | ||
412 | |||
413 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
414 | if (list_empty(&ch->list)) | ||
415 | is_empty = true; | ||
416 | else | ||
417 | is_empty = false; | ||
418 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
419 | return is_empty; | ||
420 | } | ||
421 | EXPORT_SYMBOL(tegra_dma_is_empty); | ||
422 | |||
423 | bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch, | ||
424 | struct tegra_dma_req *_req) | ||
425 | { | ||
426 | unsigned long irq_flags; | ||
427 | struct tegra_dma_req *req; | ||
428 | |||
429 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
430 | list_for_each_entry(req, &ch->list, node) { | ||
431 | if (req == _req) { | ||
432 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
433 | return true; | ||
434 | } | ||
435 | } | ||
436 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
437 | return false; | ||
438 | } | ||
439 | EXPORT_SYMBOL(tegra_dma_is_req_inflight); | ||
440 | |||
441 | int tegra_dma_get_transfer_count(struct tegra_dma_channel *ch, | ||
442 | struct tegra_dma_req *req) | ||
443 | { | ||
444 | unsigned int status; | ||
445 | unsigned long irq_flags; | ||
446 | int bytes_transferred = 0; | ||
447 | |||
448 | if (IS_ERR_OR_NULL(ch)) | ||
449 | BUG(); | ||
450 | |||
451 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
452 | |||
453 | if (list_entry(ch->list.next, struct tegra_dma_req, node) != req) { | ||
454 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
455 | pr_debug("The dma request is not the head req\n"); | ||
456 | return req->bytes_transferred; | ||
457 | } | ||
458 | |||
459 | if (req->status != TEGRA_DMA_REQ_INFLIGHT) { | ||
460 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
461 | pr_debug("The dma request is not running\n"); | ||
462 | return req->bytes_transferred; | ||
463 | } | ||
464 | |||
465 | status = get_channel_status(ch, req, false); | ||
466 | bytes_transferred = dma_active_count(ch, req, status); | ||
467 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
468 | return bytes_transferred; | ||
469 | } | ||
470 | EXPORT_SYMBOL(tegra_dma_get_transfer_count); | ||
471 | |||
472 | int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, | ||
473 | struct tegra_dma_req *req) | ||
474 | { | ||
475 | unsigned long irq_flags; | ||
476 | struct tegra_dma_req *_req; | ||
477 | int start_dma = 0; | ||
478 | struct tegra_dma_req *hreq, *hnreq; | ||
479 | |||
480 | if (req->size > TEGRA_DMA_MAX_TRANSFER_SIZE || | ||
481 | req->source_addr & 0x3 || req->dest_addr & 0x3) { | ||
482 | pr_err("Invalid DMA request for channel %d\n", ch->id); | ||
483 | return -EINVAL; | ||
484 | } | ||
485 | |||
486 | if ((req->size & 0x3) || | ||
487 | ((ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) && (req->size & 0x7))) | ||
488 | { | ||
489 | pr_err("Invalid DMA request size 0x%08x for channel %d\n", | ||
490 | req->size, ch->id); | ||
491 | return -EINVAL; | ||
492 | } | ||
493 | |||
494 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
495 | |||
496 | list_for_each_entry(_req, &ch->list, node) { | ||
497 | if (req == _req) { | ||
498 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
499 | return -EEXIST; | ||
500 | } | ||
501 | } | ||
502 | |||
503 | req->bytes_transferred = 0; | ||
504 | req->status = 0; | ||
505 | /* STATUS_EMPTY just means the DMA hasn't processed the buf yet. */ | ||
506 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_EMPTY; | ||
507 | if (list_empty(&ch->list)) | ||
508 | start_dma = 1; | ||
509 | |||
510 | list_add_tail(&req->node, &ch->list); | ||
511 | |||
512 | if (start_dma) { | ||
513 | tegra_dma_update_hw(ch, req); | ||
514 | } else { | ||
515 | /* | ||
516 | * Check to see if this request needs to be configured | ||
517 | * immediately in continuous mode. | ||
518 | */ | ||
519 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) | ||
520 | goto end; | ||
521 | |||
522 | hreq = list_entry(ch->list.next, typeof(*hreq), node); | ||
523 | hnreq = list_entry(hreq->node.next, typeof(*hnreq), node); | ||
524 | if (hnreq != req) | ||
525 | goto end; | ||
526 | |||
527 | if ((ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) && | ||
528 | (req->buffer_status != TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)) | ||
529 | goto end; | ||
530 | |||
531 | /* Need to configure the new request now */ | ||
532 | tegra_dma_update_hw_partial(ch, req); | ||
533 | } | ||
534 | |||
535 | end: | ||
536 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
537 | return 0; | ||
538 | } | ||
539 | EXPORT_SYMBOL(tegra_dma_enqueue_req); | ||
540 | |||
541 | static void tegra_dma_dump_channel_usage(void) | ||
542 | { | ||
543 | int i; | ||
544 | pr_info("DMA channel allocation dump:\n"); | ||
545 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | ||
546 | struct tegra_dma_channel *ch = &dma_channels[i]; | ||
547 | pr_warn("dma %d used by %s\n", i, ch->client_name); | ||
548 | } | ||
549 | return; | ||
550 | } | ||
551 | |||
552 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode, | ||
553 | const char namefmt[], ...) | ||
554 | { | ||
555 | int channel; | ||
556 | struct tegra_dma_channel *ch = NULL; | ||
557 | va_list args; | ||
558 | dma_isr_handler isr_handler = NULL; | ||
559 | |||
560 | if (WARN_ON(!tegra_dma_initialized)) | ||
561 | return NULL; | ||
562 | |||
563 | mutex_lock(&tegra_dma_lock); | ||
564 | |||
565 | /* first channel is the shared channel */ | ||
566 | if (mode & TEGRA_DMA_SHARED) { | ||
567 | channel = TEGRA_SYSTEM_DMA_CH_MIN; | ||
568 | } else { | ||
569 | channel = find_first_zero_bit(channel_usage, | ||
570 | ARRAY_SIZE(dma_channels)); | ||
571 | if (channel >= ARRAY_SIZE(dma_channels)) { | ||
572 | tegra_dma_dump_channel_usage(); | ||
573 | goto out; | ||
574 | } | ||
575 | } | ||
576 | |||
577 | if (mode & TEGRA_DMA_MODE_ONESHOT) | ||
578 | isr_handler = handle_oneshot_dma; | ||
579 | else if (mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) | ||
580 | isr_handler = handle_continuous_dbl_dma; | ||
581 | else if (mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE) | ||
582 | isr_handler = handle_continuous_sngl_dma; | ||
583 | else | ||
584 | pr_err("Bad channel mode for DMA ISR handler\n"); | ||
585 | |||
586 | if (!isr_handler) | ||
587 | goto out; | ||
588 | |||
589 | __set_bit(channel, channel_usage); | ||
590 | ch = &dma_channels[channel]; | ||
591 | ch->mode = mode; | ||
592 | ch->isr_handler = isr_handler; | ||
593 | va_start(args, namefmt); | ||
594 | vsnprintf(ch->client_name, sizeof(ch->client_name), | ||
595 | namefmt, args); | ||
596 | va_end(args); | ||
597 | |||
598 | out: | ||
599 | mutex_unlock(&tegra_dma_lock); | ||
600 | return ch; | ||
601 | } | ||
602 | EXPORT_SYMBOL(tegra_dma_allocate_channel); | ||
603 | |||
604 | void tegra_dma_free_channel(struct tegra_dma_channel *ch) | ||
605 | { | ||
606 | if (ch->mode & TEGRA_DMA_SHARED) | ||
607 | return; | ||
608 | tegra_dma_cancel(ch); | ||
609 | mutex_lock(&tegra_dma_lock); | ||
610 | __clear_bit(ch->id, channel_usage); | ||
611 | memset(ch->client_name, 0, sizeof(ch->client_name)); | ||
612 | ch->isr_handler = NULL; | ||
613 | ch->callback = NULL; | ||
614 | ch->cb_req = NULL; | ||
615 | mutex_unlock(&tegra_dma_lock); | ||
616 | } | ||
617 | EXPORT_SYMBOL(tegra_dma_free_channel); | ||
618 | |||
619 | static bool tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | ||
620 | struct tegra_dma_req *req) | ||
621 | { | ||
622 | u32 apb_ptr; | ||
623 | u32 ahb_ptr; | ||
624 | u32 csr; | ||
625 | unsigned long status; | ||
626 | unsigned int req_transfer_count; | ||
627 | bool configure = false; | ||
628 | |||
629 | if (req->to_memory) { | ||
630 | apb_ptr = req->source_addr; | ||
631 | ahb_ptr = req->dest_addr; | ||
632 | } else { | ||
633 | apb_ptr = req->dest_addr; | ||
634 | ahb_ptr = req->source_addr; | ||
635 | } | ||
636 | |||
637 | /* | ||
638 | * The dma controller reloads the new configuration for next transfer | ||
639 | * after last burst of current transfer completes. | ||
640 | * If there is no IEC status then this make sure that last burst | ||
641 | * has not be completed. | ||
642 | * If there is already IEC status then interrupt handle need to | ||
643 | * load new configuration after aborting current dma. | ||
644 | */ | ||
645 | pause_dma(false); | ||
646 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
647 | |||
648 | /* | ||
649 | * If interrupt is pending then do nothing as the ISR will handle | ||
650 | * the programing for new request. | ||
651 | */ | ||
652 | if (status & STA_ISE_EOC) { | ||
653 | pr_warn("%s(): " | ||
654 | "Skipping new configuration as interrupt is pending\n", | ||
655 | __func__); | ||
656 | goto exit_config; | ||
657 | } | ||
658 | |||
659 | /* Safe to program new configuration */ | ||
660 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | ||
661 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | ||
662 | |||
663 | req_transfer_count = get_req_xfer_word_count(ch, req); | ||
664 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); | ||
665 | csr &= ~CSR_WCOUNT_MASK; | ||
666 | csr |= (req_transfer_count - 1) << CSR_WCOUNT_SHIFT; | ||
667 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | ||
668 | req->status = TEGRA_DMA_REQ_INFLIGHT; | ||
669 | configure = true; | ||
670 | |||
671 | exit_config: | ||
672 | resume_dma(); | ||
673 | return configure; | ||
674 | } | ||
675 | |||
676 | static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | ||
677 | struct tegra_dma_req *req) | ||
678 | { | ||
679 | int ahb_addr_wrap; | ||
680 | int apb_addr_wrap; | ||
681 | int ahb_bus_width; | ||
682 | int apb_bus_width; | ||
683 | int index; | ||
684 | unsigned int req_transfer_count; | ||
685 | |||
686 | u32 ahb_seq; | ||
687 | u32 apb_seq; | ||
688 | u32 ahb_ptr; | ||
689 | u32 apb_ptr; | ||
690 | u32 csr; | ||
691 | |||
692 | csr = CSR_IE_EOC | CSR_FLOW; | ||
693 | ahb_seq = AHB_SEQ_INTR_ENB; | ||
694 | |||
695 | switch (req->req_sel) { | ||
696 | case TEGRA_DMA_REQ_SEL_SL2B1: | ||
697 | case TEGRA_DMA_REQ_SEL_SL2B2: | ||
698 | case TEGRA_DMA_REQ_SEL_SL2B3: | ||
699 | case TEGRA_DMA_REQ_SEL_SL2B4: | ||
700 | #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
701 | case TEGRA_DMA_REQ_SEL_SL2B5: | ||
702 | case TEGRA_DMA_REQ_SEL_SL2B6: | ||
703 | case TEGRA_DMA_REQ_SEL_APBIF_CH0: | ||
704 | case TEGRA_DMA_REQ_SEL_APBIF_CH1: | ||
705 | case TEGRA_DMA_REQ_SEL_APBIF_CH2: | ||
706 | case TEGRA_DMA_REQ_SEL_APBIF_CH3: | ||
707 | #endif | ||
708 | case TEGRA_DMA_REQ_SEL_SPI: | ||
709 | /* dtv interface has fixed burst size of 4 */ | ||
710 | if (req->fixed_burst_size) { | ||
711 | ahb_seq |= AHB_SEQ_BURST_4; | ||
712 | break; | ||
713 | } | ||
714 | /* For spi/slink the burst size based on transfer size | ||
715 | * i.e. if multiple of 32 bytes then busrt is 8 | ||
716 | * word(8x32bits) else if multiple of 16 bytes then | ||
717 | * burst is 4 word(4x32bits) else burst size is 1 | ||
718 | * word(1x32bits) */ | ||
719 | if (req->size & 0xF) | ||
720 | ahb_seq |= AHB_SEQ_BURST_1; | ||
721 | else if ((req->size >> 4) & 0x1) | ||
722 | ahb_seq |= AHB_SEQ_BURST_4; | ||
723 | else | ||
724 | ahb_seq |= AHB_SEQ_BURST_8; | ||
725 | break; | ||
726 | #if defined(CONFIG_ARCH_TEGRA_2x_SOC) | ||
727 | case TEGRA_DMA_REQ_SEL_I2S_2: | ||
728 | case TEGRA_DMA_REQ_SEL_I2S_1: | ||
729 | case TEGRA_DMA_REQ_SEL_SPD_I: | ||
730 | case TEGRA_DMA_REQ_SEL_UI_I: | ||
731 | case TEGRA_DMA_REQ_SEL_I2S2_2: | ||
732 | case TEGRA_DMA_REQ_SEL_I2S2_1: | ||
733 | /* For ARCH_2x i2s/spdif burst size is 4 word */ | ||
734 | ahb_seq |= AHB_SEQ_BURST_4; | ||
735 | break; | ||
736 | #endif | ||
737 | |||
738 | default: | ||
739 | ahb_seq |= AHB_SEQ_BURST_1; | ||
740 | break; | ||
741 | } | ||
742 | |||
743 | apb_seq = 0; | ||
744 | |||
745 | csr |= req->req_sel << CSR_REQ_SEL_SHIFT; | ||
746 | |||
747 | req_transfer_count = get_req_xfer_word_count(ch, req); | ||
748 | |||
749 | /* One shot mode is always single buffered. Continuous mode could | ||
750 | * support either. | ||
751 | */ | ||
752 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) | ||
753 | csr |= CSR_ONCE; | ||
754 | |||
755 | if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) | ||
756 | ahb_seq |= AHB_SEQ_DBL_BUF; | ||
757 | |||
758 | csr |= (req_transfer_count - 1) << CSR_WCOUNT_SHIFT; | ||
759 | |||
760 | if (req->to_memory) { | ||
761 | apb_ptr = req->source_addr; | ||
762 | ahb_ptr = req->dest_addr; | ||
763 | |||
764 | apb_addr_wrap = req->source_wrap; | ||
765 | ahb_addr_wrap = req->dest_wrap; | ||
766 | apb_bus_width = req->source_bus_width; | ||
767 | ahb_bus_width = req->dest_bus_width; | ||
768 | |||
769 | } else { | ||
770 | csr |= CSR_DIR; | ||
771 | apb_ptr = req->dest_addr; | ||
772 | ahb_ptr = req->source_addr; | ||
773 | |||
774 | apb_addr_wrap = req->dest_wrap; | ||
775 | ahb_addr_wrap = req->source_wrap; | ||
776 | apb_bus_width = req->dest_bus_width; | ||
777 | ahb_bus_width = req->source_bus_width; | ||
778 | } | ||
779 | |||
780 | apb_addr_wrap >>= 2; | ||
781 | ahb_addr_wrap >>= 2; | ||
782 | |||
783 | /* set address wrap for APB size */ | ||
784 | index = 0; | ||
785 | do { | ||
786 | if (apb_addr_wrap_table[index] == apb_addr_wrap) | ||
787 | break; | ||
788 | index++; | ||
789 | } while (index < ARRAY_SIZE(apb_addr_wrap_table)); | ||
790 | BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); | ||
791 | apb_seq |= index << APB_SEQ_WRAP_SHIFT; | ||
792 | |||
793 | /* set address wrap for AHB size */ | ||
794 | index = 0; | ||
795 | do { | ||
796 | if (ahb_addr_wrap_table[index] == ahb_addr_wrap) | ||
797 | break; | ||
798 | index++; | ||
799 | } while (index < ARRAY_SIZE(ahb_addr_wrap_table)); | ||
800 | BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); | ||
801 | ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; | ||
802 | |||
803 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | ||
804 | if (bus_width_table[index] == ahb_bus_width) | ||
805 | break; | ||
806 | } | ||
807 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | ||
808 | ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; | ||
809 | |||
810 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | ||
811 | if (bus_width_table[index] == apb_bus_width) | ||
812 | break; | ||
813 | } | ||
814 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | ||
815 | apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; | ||
816 | |||
817 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | ||
818 | writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); | ||
819 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | ||
820 | writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); | ||
821 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | ||
822 | |||
823 | csr |= CSR_ENB; | ||
824 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | ||
825 | |||
826 | req->status = TEGRA_DMA_REQ_INFLIGHT; | ||
827 | } | ||
828 | |||
829 | static void handle_oneshot_dma(struct tegra_dma_channel *ch) | ||
830 | { | ||
831 | struct tegra_dma_req *req; | ||
832 | |||
833 | req = list_entry(ch->list.next, typeof(*req), node); | ||
834 | list_del(&req->node); | ||
835 | req->bytes_transferred += req->size; | ||
836 | req->status = TEGRA_DMA_REQ_SUCCESS; | ||
837 | |||
838 | ch->callback = req->complete; | ||
839 | ch->cb_req = req; | ||
840 | |||
841 | start_head_req(ch); | ||
842 | return; | ||
843 | } | ||
844 | |||
845 | static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch) | ||
846 | { | ||
847 | struct tegra_dma_req *req; | ||
848 | |||
849 | req = list_entry(ch->list.next, typeof(*req), node); | ||
850 | |||
851 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { | ||
852 | bool is_dma_ping_complete; | ||
853 | unsigned long status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
854 | is_dma_ping_complete = (status & STA_PING_PONG) ? true : false; | ||
855 | |||
856 | /* Ping pong status shows in reverse if it is Memory write */ | ||
857 | if (req->to_memory) | ||
858 | is_dma_ping_complete = !is_dma_ping_complete; | ||
859 | |||
860 | /* Out of sync - Release current buffer */ | ||
861 | if (!is_dma_ping_complete) { | ||
862 | /* | ||
863 | * We should not land here if queue mechanism | ||
864 | * with system latency are properly configured. | ||
865 | */ | ||
866 | req->bytes_transferred += req->size; | ||
867 | |||
868 | list_del(&req->node); | ||
869 | ch->callback = req->complete; | ||
870 | ch->cb_req = req; | ||
871 | |||
872 | tegra_dma_abort_req(ch, req, | ||
873 | "Dma becomes out of sync for ping-pong buffer"); | ||
874 | return; | ||
875 | } | ||
876 | |||
877 | /* | ||
878 | * Configure next request so after full buffer transfer, | ||
879 | * it can be start without sw intervention. | ||
880 | */ | ||
881 | configure_next_req(ch, req); | ||
882 | |||
883 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; | ||
884 | req->status = TEGRA_DMA_REQ_SUCCESS; | ||
885 | req->bytes_transferred += req->size >> 1; | ||
886 | |||
887 | ch->callback = req->threshold; | ||
888 | ch->cb_req = req; | ||
889 | return; | ||
890 | } | ||
891 | |||
892 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) { | ||
893 | /* Interrupt for full buffer complete */ | ||
894 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | ||
895 | req->bytes_transferred += req->size >> 1; | ||
896 | req->status = TEGRA_DMA_REQ_SUCCESS; | ||
897 | |||
898 | list_del(&req->node); | ||
899 | ch->callback = req->complete; | ||
900 | ch->cb_req = req; | ||
901 | |||
902 | handle_continuous_head_request(ch, req); | ||
903 | return; | ||
904 | } | ||
905 | tegra_dma_abort_req(ch, req, "Dma status is not on sync\n"); | ||
906 | /* Dma should be stop much earlier */ | ||
907 | BUG(); | ||
908 | return; | ||
909 | } | ||
910 | |||
911 | static void handle_continuous_sngl_dma(struct tegra_dma_channel *ch) | ||
912 | { | ||
913 | struct tegra_dma_req *req; | ||
914 | |||
915 | req = list_entry(ch->list.next, typeof(*req), node); | ||
916 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_FULL) { | ||
917 | tegra_dma_stop(ch); | ||
918 | pr_err("%s: DMA complete irq without corresponding req\n", | ||
919 | __func__); | ||
920 | WARN_ON(1); | ||
921 | return; | ||
922 | } | ||
923 | |||
924 | /* Handle the case when buffer is completely full */ | ||
925 | req->bytes_transferred += req->size; | ||
926 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | ||
927 | req->status = TEGRA_DMA_REQ_SUCCESS; | ||
928 | |||
929 | list_del(&req->node); | ||
930 | ch->callback = req->complete; | ||
931 | ch->cb_req = req; | ||
932 | |||
933 | handle_continuous_head_request(ch, req); | ||
934 | return; | ||
935 | } | ||
936 | |||
937 | static void handle_dma_isr_locked(struct tegra_dma_channel *ch) | ||
938 | { | ||
939 | /* There should be proper isr handler */ | ||
940 | BUG_ON(!ch->isr_handler); | ||
941 | |||
942 | if (list_empty(&ch->list)) { | ||
943 | tegra_dma_stop(ch); | ||
944 | pr_err("%s: No requests in the list.\n", __func__); | ||
945 | WARN_ON(1); | ||
946 | return; | ||
947 | } | ||
948 | |||
949 | ch->isr_handler(ch); | ||
950 | } | ||
951 | |||
952 | static irqreturn_t dma_isr(int irq, void *data) | ||
953 | { | ||
954 | struct tegra_dma_channel *ch = data; | ||
955 | unsigned long irq_flags; | ||
956 | unsigned long status; | ||
957 | dma_callback callback = NULL; | ||
958 | struct tegra_dma_req *cb_req = NULL; | ||
959 | |||
960 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
961 | |||
962 | /* | ||
963 | * Calbacks should be set and cleared while holding the spinlock, | ||
964 | * never left set | ||
965 | */ | ||
966 | if (ch->callback || ch->cb_req) | ||
967 | pr_err("%s():" | ||
968 | "Channel %d callbacks are not initialized properly\n", | ||
969 | __func__, ch->id); | ||
970 | BUG_ON(ch->callback || ch->cb_req); | ||
971 | |||
972 | status = readl(ch->addr + APB_DMA_CHAN_STA); | ||
973 | if (status & STA_ISE_EOC) { | ||
974 | /* Clear dma int status */ | ||
975 | writel(status, ch->addr + APB_DMA_CHAN_STA); | ||
976 | handle_dma_isr_locked(ch); | ||
977 | callback = ch->callback; | ||
978 | cb_req = ch->cb_req; | ||
979 | ch->callback = NULL; | ||
980 | ch->cb_req = NULL; | ||
981 | } else { | ||
982 | pr_info("Interrupt is already handled %d\n", ch->id); | ||
983 | } | ||
984 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
985 | |||
986 | /* Call callback function to notify client if it is there */ | ||
987 | if (callback) | ||
988 | callback(cb_req); | ||
989 | return IRQ_HANDLED; | ||
990 | } | ||
991 | |||
992 | int __init tegra_dma_init(void) | ||
993 | { | ||
994 | int ret = 0; | ||
995 | int i; | ||
996 | unsigned int irq; | ||
997 | |||
998 | bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS); | ||
999 | |||
1000 | dma_clk = clk_get_sys("tegra-dma", NULL); | ||
1001 | if (IS_ERR_OR_NULL(dma_clk)) { | ||
1002 | pr_err("Unable to get clock for APB DMA\n"); | ||
1003 | ret = PTR_ERR(dma_clk); | ||
1004 | goto fail; | ||
1005 | } | ||
1006 | ret = clk_enable(dma_clk); | ||
1007 | if (ret != 0) { | ||
1008 | pr_err("Unable to enable clock for APB DMA\n"); | ||
1009 | goto fail; | ||
1010 | } | ||
1011 | |||
1012 | /* | ||
1013 | * Resetting all dma channels to make sure all channels are in init | ||
1014 | * state. | ||
1015 | */ | ||
1016 | tegra_periph_reset_assert(dma_clk); | ||
1017 | udelay(10); | ||
1018 | tegra_periph_reset_deassert(dma_clk); | ||
1019 | udelay(10); | ||
1020 | |||
1021 | writel(GEN_ENABLE, general_dma_addr + APB_DMA_GEN); | ||
1022 | writel(0, general_dma_addr + APB_DMA_CNTRL); | ||
1023 | writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), | ||
1024 | general_dma_addr + APB_DMA_IRQ_MASK_SET); | ||
1025 | |||
1026 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | ||
1027 | struct tegra_dma_channel *ch = &dma_channels[i]; | ||
1028 | |||
1029 | ch->id = i; | ||
1030 | ch->isr_handler = NULL; | ||
1031 | snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); | ||
1032 | |||
1033 | memset(ch->client_name, 0, sizeof(ch->client_name)); | ||
1034 | |||
1035 | ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | ||
1036 | TEGRA_APB_DMA_CH0_SIZE * i); | ||
1037 | |||
1038 | spin_lock_init(&ch->lock); | ||
1039 | INIT_LIST_HEAD(&ch->list); | ||
1040 | |||
1041 | #ifndef CONFIG_ARCH_TEGRA_2x_SOC | ||
1042 | if (i >= 16) | ||
1043 | irq = INT_APB_DMA_CH16 + i - 16; | ||
1044 | else | ||
1045 | #endif | ||
1046 | irq = INT_APB_DMA_CH0 + i; | ||
1047 | ret = request_irq(irq, dma_isr, 0, dma_channels[i].name, ch); | ||
1048 | if (ret) { | ||
1049 | pr_err("Failed to register IRQ %d for DMA %d\n", | ||
1050 | irq, i); | ||
1051 | goto fail; | ||
1052 | } | ||
1053 | ch->irq = irq; | ||
1054 | |||
1055 | __clear_bit(i, channel_usage); | ||
1056 | } | ||
1057 | /* mark the shared channel allocated */ | ||
1058 | __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage); | ||
1059 | |||
1060 | tegra_dma_initialized = true; | ||
1061 | |||
1062 | return 0; | ||
1063 | fail: | ||
1064 | writel(0, general_dma_addr + APB_DMA_GEN); | ||
1065 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | ||
1066 | struct tegra_dma_channel *ch = &dma_channels[i]; | ||
1067 | if (ch->irq) | ||
1068 | free_irq(ch->irq, ch); | ||
1069 | } | ||
1070 | return ret; | ||
1071 | } | ||
1072 | postcore_initcall(tegra_dma_init); | ||
1073 | |||
1074 | #ifdef CONFIG_PM_SLEEP | ||
1075 | |||
1076 | static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; | ||
1077 | |||
1078 | static int tegra_dma_suspend(void) | ||
1079 | { | ||
1080 | u32 *ctx = apb_dma; | ||
1081 | int i; | ||
1082 | |||
1083 | *ctx++ = readl(general_dma_addr + APB_DMA_GEN); | ||
1084 | *ctx++ = readl(general_dma_addr + APB_DMA_CNTRL); | ||
1085 | *ctx++ = readl(general_dma_addr + APB_DMA_IRQ_MASK); | ||
1086 | |||
1087 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { | ||
1088 | void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | ||
1089 | TEGRA_APB_DMA_CH0_SIZE * i); | ||
1090 | |||
1091 | *ctx++ = readl(addr + APB_DMA_CHAN_CSR); | ||
1092 | *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR); | ||
1093 | *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ); | ||
1094 | *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR); | ||
1095 | *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ); | ||
1096 | } | ||
1097 | |||
1098 | /* Disabling clock of dma. */ | ||
1099 | clk_disable(dma_clk); | ||
1100 | return 0; | ||
1101 | } | ||
1102 | |||
1103 | static void tegra_dma_resume(void) | ||
1104 | { | ||
1105 | u32 *ctx = apb_dma; | ||
1106 | int i; | ||
1107 | |||
1108 | /* Enabling clock of dma. */ | ||
1109 | clk_enable(dma_clk); | ||
1110 | |||
1111 | writel(*ctx++, general_dma_addr + APB_DMA_GEN); | ||
1112 | writel(*ctx++, general_dma_addr + APB_DMA_CNTRL); | ||
1113 | writel(*ctx++, general_dma_addr + APB_DMA_IRQ_MASK); | ||
1114 | |||
1115 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { | ||
1116 | void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | ||
1117 | TEGRA_APB_DMA_CH0_SIZE * i); | ||
1118 | |||
1119 | writel(*ctx++, addr + APB_DMA_CHAN_CSR); | ||
1120 | writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR); | ||
1121 | writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ); | ||
1122 | writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR); | ||
1123 | writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ); | ||
1124 | } | ||
1125 | } | ||
1126 | |||
1127 | static struct syscore_ops tegra_dma_syscore_ops = { | ||
1128 | .suspend = tegra_dma_suspend, | ||
1129 | .resume = tegra_dma_resume, | ||
1130 | }; | ||
1131 | |||
1132 | static int tegra_dma_syscore_init(void) | ||
1133 | { | ||
1134 | register_syscore_ops(&tegra_dma_syscore_ops); | ||
1135 | |||
1136 | return 0; | ||
1137 | } | ||
1138 | subsys_initcall(tegra_dma_syscore_init); | ||
1139 | #endif | ||
1140 | |||
1141 | #ifdef CONFIG_DEBUG_FS | ||
1142 | |||
1143 | #include <linux/debugfs.h> | ||
1144 | #include <linux/seq_file.h> | ||
1145 | |||
1146 | static int dbg_dma_show(struct seq_file *s, void *unused) | ||
1147 | { | ||
1148 | int i; | ||
1149 | |||
1150 | seq_printf(s, " APBDMA global register\n"); | ||
1151 | seq_printf(s, "DMA_GEN: 0x%08x\n", | ||
1152 | __raw_readl(general_dma_addr + APB_DMA_GEN)); | ||
1153 | seq_printf(s, "DMA_CNTRL: 0x%08x\n", | ||
1154 | __raw_readl(general_dma_addr + APB_DMA_CNTRL)); | ||
1155 | seq_printf(s, "IRQ_MASK: 0x%08x\n", | ||
1156 | __raw_readl(general_dma_addr + APB_DMA_IRQ_MASK)); | ||
1157 | |||
1158 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { | ||
1159 | void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | ||
1160 | TEGRA_APB_DMA_CH0_SIZE * i); | ||
1161 | |||
1162 | seq_printf(s, " APBDMA channel %02d register\n", i); | ||
1163 | seq_printf(s, "0x00: 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
1164 | __raw_readl(addr + 0x0), | ||
1165 | __raw_readl(addr + 0x4), | ||
1166 | __raw_readl(addr + 0x8), | ||
1167 | __raw_readl(addr + 0xC)); | ||
1168 | seq_printf(s, "0x10: 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
1169 | __raw_readl(addr + 0x10), | ||
1170 | __raw_readl(addr + 0x14), | ||
1171 | __raw_readl(addr + 0x18), | ||
1172 | __raw_readl(addr + 0x1C)); | ||
1173 | } | ||
1174 | seq_printf(s, "\nAPB DMA users\n"); | ||
1175 | seq_printf(s, "-------------\n"); | ||
1176 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | ||
1177 | struct tegra_dma_channel *ch = &dma_channels[i]; | ||
1178 | if (strlen(ch->client_name) > 0) | ||
1179 | seq_printf(s, "dma %d -> %s\n", i, ch->client_name); | ||
1180 | } | ||
1181 | return 0; | ||
1182 | } | ||
1183 | |||
1184 | static int dbg_dma_open(struct inode *inode, struct file *file) | ||
1185 | { | ||
1186 | return single_open(file, dbg_dma_show, &inode->i_private); | ||
1187 | } | ||
1188 | |||
1189 | static const struct file_operations debug_fops = { | ||
1190 | .open = dbg_dma_open, | ||
1191 | .read = seq_read, | ||
1192 | .llseek = seq_lseek, | ||
1193 | .release = single_release, | ||
1194 | }; | ||
1195 | |||
1196 | static int __init tegra_dma_debuginit(void) | ||
1197 | { | ||
1198 | (void) debugfs_create_file("tegra_dma", S_IRUGO, | ||
1199 | NULL, NULL, &debug_fops); | ||
1200 | return 0; | ||
1201 | } | ||
1202 | late_initcall(tegra_dma_debuginit); | ||
1203 | #endif | ||