aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra
diff options
context:
space:
mode:
authorColin Cross <ccross@android.com>2010-04-05 16:16:42 -0400
committerColin Cross <ccross@android.com>2010-10-21 21:12:35 -0400
commit4de3a8fa334851e642d4889d6afa6e5d3daea10a (patch)
tree955a73509a5745ee4e7a71fa42f10c0c57632aa5 /arch/arm/mach-tegra
parent7056d423f16103f6700569f60ca842d91bfaabab (diff)
[ARM] tegra: Add APB DMA support
The APB DMA block handles DMA transfers to and from some peripherals in the Tegra SOC. It reads from sequential addresses on the memory bus, and writes repeatedly to the same address on the APB bus. Two transfer modes are supported, oneshot for transferring a known size to or from a peripheral, and continuous for streaming data. In continuous mode, a callback occurs when the buffer is half full to allow the existing data to be handled and a new request queued.x v2 changes: dma API no longer uses PTR_ERR Signed-off-by: Erik Gilling <konkers@android.com> Signed-off-by: Colin Cross <ccross@android.com>
Diffstat (limited to 'arch/arm/mach-tegra')
-rw-r--r--arch/arm/mach-tegra/Kconfig7
-rw-r--r--arch/arm/mach-tegra/Makefile1
-rw-r--r--arch/arm/mach-tegra/common.c7
-rw-r--r--arch/arm/mach-tegra/dma.c752
-rw-r--r--arch/arm/mach-tegra/include/mach/dma.h155
5 files changed, 922 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index a57713c1954a..8e32d9d7ec8d 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -47,4 +47,11 @@ config TEGRA_DEBUG_UARTE
47 47
48endchoice 48endchoice
49 49
50config TEGRA_SYSTEM_DMA
51 bool "Enable system DMA driver for NVIDIA Tegra SoCs"
52 default y
53 help
54 Adds system DMA functionality for NVIDIA Tegra SoCs, used by
55 several Tegra device drivers
56
50endif 57endif
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 2a3762179ccf..fc069a9e0f6b 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_dvfs.o
12obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o 12obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o
13obj-$(CONFIG_SMP) += platsmp.o localtimer.o headsmp.o 13obj-$(CONFIG_SMP) += platsmp.o localtimer.o headsmp.o
14obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 14obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
15obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o
15obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o 16obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o
16 17
17obj-${CONFIG_MACH_HARMONY} += board-harmony.o 18obj-${CONFIG_MACH_HARMONY} += board-harmony.o
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 445104a993ba..7c91e2b9d643 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -19,10 +19,13 @@
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/clk.h>
23#include <linux/delay.h>
22 24
23#include <asm/hardware/cache-l2x0.h> 25#include <asm/hardware/cache-l2x0.h>
24 26
25#include <mach/iomap.h> 27#include <mach/iomap.h>
28#include <mach/dma.h>
26 29
27#include "board.h" 30#include "board.h"
28#include "clock.h" 31#include "clock.h"
@@ -52,6 +55,7 @@ void __init tegra_init_cache(void)
52 55
53 l2x0_init(p, 0x6C080001, 0x8200c3fe); 56 l2x0_init(p, 0x6C080001, 0x8200c3fe);
54#endif 57#endif
58
55} 59}
56 60
57void __init tegra_common_init(void) 61void __init tegra_common_init(void)
@@ -60,4 +64,7 @@ void __init tegra_common_init(void)
60 tegra_init_clock(); 64 tegra_init_clock();
61 tegra_clk_init_from_table(common_clk_init_table); 65 tegra_clk_init_from_table(common_clk_init_table);
62 tegra_init_cache(); 66 tegra_init_cache();
67#ifdef CONFIG_TEGRA_SYSTEM_DMA
68 tegra_dma_init();
69#endif
63} 70}
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
new file mode 100644
index 000000000000..edda6ec5e925
--- /dev/null
+++ b/arch/arm/mach-tegra/dma.c
@@ -0,0 +1,752 @@
1/*
2 * arch/arm/mach-tegra/dma.c
3 *
4 * System DMA driver for NVIDIA Tegra SoCs
5 *
6 * Copyright (c) 2008-2009, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/io.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/spinlock.h>
27#include <linux/err.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <mach/dma.h>
31#include <mach/irqs.h>
32#include <mach/iomap.h>
33
34#define APB_DMA_GEN 0x000
35#define GEN_ENABLE (1<<31)
36
37#define APB_DMA_CNTRL 0x010
38
39#define APB_DMA_IRQ_MASK 0x01c
40
41#define APB_DMA_IRQ_MASK_SET 0x020
42
43#define APB_DMA_CHAN_CSR 0x000
44#define CSR_ENB (1<<31)
45#define CSR_IE_EOC (1<<30)
46#define CSR_HOLD (1<<29)
47#define CSR_DIR (1<<28)
48#define CSR_ONCE (1<<27)
49#define CSR_FLOW (1<<21)
50#define CSR_REQ_SEL_SHIFT 16
51#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
52#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
53#define CSR_WCOUNT_SHIFT 2
54#define CSR_WCOUNT_MASK 0xFFFC
55
56#define APB_DMA_CHAN_STA 0x004
57#define STA_BUSY (1<<31)
58#define STA_ISE_EOC (1<<30)
59#define STA_HALT (1<<29)
60#define STA_PING_PONG (1<<28)
61#define STA_COUNT_SHIFT 2
62#define STA_COUNT_MASK 0xFFFC
63
64#define APB_DMA_CHAN_AHB_PTR 0x010
65
66#define APB_DMA_CHAN_AHB_SEQ 0x014
67#define AHB_SEQ_INTR_ENB (1<<31)
68#define AHB_SEQ_BUS_WIDTH_SHIFT 28
69#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
70#define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
71#define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
72#define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
73#define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
74#define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
75#define AHB_SEQ_DATA_SWAP (1<<27)
76#define AHB_SEQ_BURST_MASK (0x7<<24)
77#define AHB_SEQ_BURST_1 (4<<24)
78#define AHB_SEQ_BURST_4 (5<<24)
79#define AHB_SEQ_BURST_8 (6<<24)
80#define AHB_SEQ_DBL_BUF (1<<19)
81#define AHB_SEQ_WRAP_SHIFT 16
82#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
83
84#define APB_DMA_CHAN_APB_PTR 0x018
85
86#define APB_DMA_CHAN_APB_SEQ 0x01c
87#define APB_SEQ_BUS_WIDTH_SHIFT 28
88#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
89#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
90#define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
91#define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
92#define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
93#define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
94#define APB_SEQ_DATA_SWAP (1<<27)
95#define APB_SEQ_WRAP_SHIFT 16
96#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
97
98#define TEGRA_SYSTEM_DMA_CH_NR 16
99#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
100#define TEGRA_SYSTEM_DMA_CH_MIN 0
101#define TEGRA_SYSTEM_DMA_CH_MAX \
102 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
103
104#define NV_DMA_MAX_TRASFER_SIZE 0x10000
105
106const unsigned int ahb_addr_wrap_table[8] = {
107 0, 32, 64, 128, 256, 512, 1024, 2048
108};
109
110const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
111
112const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
113
114#define TEGRA_DMA_NAME_SIZE 16
115struct tegra_dma_channel {
116 struct list_head list;
117 int id;
118 spinlock_t lock;
119 char name[TEGRA_DMA_NAME_SIZE];
120 void __iomem *addr;
121 int mode;
122 int irq;
123
124 /* Register shadow */
125 u32 csr;
126 u32 ahb_seq;
127 u32 ahb_ptr;
128 u32 apb_seq;
129 u32 apb_ptr;
130};
131
132#define NV_DMA_MAX_CHANNELS 32
133
134static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
135static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
136
137static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
138 struct tegra_dma_req *req);
139static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
140 struct tegra_dma_req *req);
141static void tegra_dma_init_hw(struct tegra_dma_channel *ch);
142static void tegra_dma_stop(struct tegra_dma_channel *ch);
143
144void tegra_dma_flush(struct tegra_dma_channel *ch)
145{
146}
147EXPORT_SYMBOL(tegra_dma_flush);
148
149void tegra_dma_dequeue(struct tegra_dma_channel *ch)
150{
151 struct tegra_dma_req *req;
152
153 req = list_entry(ch->list.next, typeof(*req), node);
154
155 tegra_dma_dequeue_req(ch, req);
156 return;
157}
158
159void tegra_dma_stop(struct tegra_dma_channel *ch)
160{
161 unsigned int csr;
162 unsigned int status;
163
164 csr = ch->csr;
165 csr &= ~CSR_IE_EOC;
166 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
167
168 csr &= ~CSR_ENB;
169 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
170
171 status = readl(ch->addr + APB_DMA_CHAN_STA);
172 if (status & STA_ISE_EOC)
173 writel(status, ch->addr + APB_DMA_CHAN_STA);
174}
175
176int tegra_dma_cancel(struct tegra_dma_channel *ch)
177{
178 unsigned int csr;
179 unsigned long irq_flags;
180
181 spin_lock_irqsave(&ch->lock, irq_flags);
182 while (!list_empty(&ch->list))
183 list_del(ch->list.next);
184
185 csr = ch->csr;
186 csr &= ~CSR_REQ_SEL_MASK;
187 csr |= CSR_REQ_SEL_INVALID;
188
189 /* Set the enable as that is not shadowed */
190 csr |= CSR_ENB;
191 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
192
193 tegra_dma_stop(ch);
194
195 spin_unlock_irqrestore(&ch->lock, irq_flags);
196 return 0;
197}
198
199int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
200 struct tegra_dma_req *_req)
201{
202 unsigned int csr;
203 unsigned int status;
204 struct tegra_dma_req *req = NULL;
205 int found = 0;
206 unsigned long irq_flags;
207 int to_transfer;
208 int req_transfer_count;
209
210 spin_lock_irqsave(&ch->lock, irq_flags);
211 list_for_each_entry(req, &ch->list, node) {
212 if (req == _req) {
213 list_del(&req->node);
214 found = 1;
215 break;
216 }
217 }
218 if (!found) {
219 spin_unlock_irqrestore(&ch->lock, irq_flags);
220 return 0;
221 }
222
223 /* STOP the DMA and get the transfer count.
224 * Getting the transfer count is tricky.
225 * - Change the source selector to invalid to stop the DMA from
226 * FIFO to memory.
227 * - Read the status register to know the number of pending
228 * bytes to be transfered.
229 * - Finally stop or program the DMA to the next buffer in the
230 * list.
231 */
232 csr = ch->csr;
233 csr &= ~CSR_REQ_SEL_MASK;
234 csr |= CSR_REQ_SEL_INVALID;
235
236 /* Set the enable as that is not shadowed */
237 csr |= CSR_ENB;
238 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
239
240 /* Get the transfer count */
241 status = readl(ch->addr + APB_DMA_CHAN_STA);
242 to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
243 req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
244 req_transfer_count += 1;
245 to_transfer += 1;
246
247 req->bytes_transferred = req_transfer_count;
248
249 if (status & STA_BUSY)
250 req->bytes_transferred -= to_transfer;
251
252 /* In continous transfer mode, DMA only tracks the count of the
253 * half DMA buffer. So, if the DMA already finished half the DMA
254 * then add the half buffer to the completed count.
255 *
256 * FIXME: There can be a race here. What if the req to
257 * dequue happens at the same time as the DMA just moved to
258 * the new buffer and SW didn't yet received the interrupt?
259 */
260 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
261 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
262 req->bytes_transferred += req_transfer_count;
263
264 req->bytes_transferred *= 4;
265
266 tegra_dma_stop(ch);
267 if (!list_empty(&ch->list)) {
268 /* if the list is not empty, queue the next request */
269 struct tegra_dma_req *next_req;
270 next_req = list_entry(ch->list.next,
271 typeof(*next_req), node);
272 tegra_dma_update_hw(ch, next_req);
273 }
274 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
275
276 spin_unlock_irqrestore(&ch->lock, irq_flags);
277
278 /* Callback should be called without any lock */
279 req->complete(req);
280 return 0;
281}
282EXPORT_SYMBOL(tegra_dma_dequeue_req);
283
284bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
285{
286 unsigned long irq_flags;
287 bool is_empty;
288
289 spin_lock_irqsave(&ch->lock, irq_flags);
290 if (list_empty(&ch->list))
291 is_empty = true;
292 else
293 is_empty = false;
294 spin_unlock_irqrestore(&ch->lock, irq_flags);
295 return is_empty;
296}
297EXPORT_SYMBOL(tegra_dma_is_empty);
298
299bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
300 struct tegra_dma_req *_req)
301{
302 unsigned long irq_flags;
303 struct tegra_dma_req *req;
304
305 spin_lock_irqsave(&ch->lock, irq_flags);
306 list_for_each_entry(req, &ch->list, node) {
307 if (req == _req) {
308 spin_unlock_irqrestore(&ch->lock, irq_flags);
309 return true;
310 }
311 }
312 spin_unlock_irqrestore(&ch->lock, irq_flags);
313 return false;
314}
315EXPORT_SYMBOL(tegra_dma_is_req_inflight);
316
317int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
318 struct tegra_dma_req *req)
319{
320 unsigned long irq_flags;
321 int start_dma = 0;
322
323 if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
324 req->source_addr & 0x3 || req->dest_addr & 0x3) {
325 pr_err("Invalid DMA request for channel %d\n", ch->id);
326 return -EINVAL;
327 }
328
329 spin_lock_irqsave(&ch->lock, irq_flags);
330
331 req->bytes_transferred = 0;
332 req->status = 0;
333 req->buffer_status = 0;
334 if (list_empty(&ch->list))
335 start_dma = 1;
336
337 list_add_tail(&req->node, &ch->list);
338
339 if (start_dma)
340 tegra_dma_update_hw(ch, req);
341
342 spin_unlock_irqrestore(&ch->lock, irq_flags);
343
344 return 0;
345}
346EXPORT_SYMBOL(tegra_dma_enqueue_req);
347
348struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
349{
350 int channel;
351 struct tegra_dma_channel *ch;
352
353 /* first channel is the shared channel */
354 if (mode & TEGRA_DMA_SHARED) {
355 channel = TEGRA_SYSTEM_DMA_CH_MIN;
356 } else {
357 channel = find_first_zero_bit(channel_usage,
358 ARRAY_SIZE(dma_channels));
359 if (channel >= ARRAY_SIZE(dma_channels))
360 return NULL;
361 }
362 __set_bit(channel, channel_usage);
363 ch = &dma_channels[channel];
364 ch->mode = mode;
365 return ch;
366}
367EXPORT_SYMBOL(tegra_dma_allocate_channel);
368
369void tegra_dma_free_channel(struct tegra_dma_channel *ch)
370{
371 if (ch->mode & TEGRA_DMA_SHARED)
372 return;
373 tegra_dma_cancel(ch);
374 __clear_bit(ch->id, channel_usage);
375}
376EXPORT_SYMBOL(tegra_dma_free_channel);
377
378static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
379 struct tegra_dma_req *req)
380{
381 if (req->to_memory) {
382 ch->apb_ptr = req->source_addr;
383 ch->ahb_ptr = req->dest_addr;
384 } else {
385 ch->apb_ptr = req->dest_addr;
386 ch->ahb_ptr = req->source_addr;
387 }
388 writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
389 writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
390
391 req->status = TEGRA_DMA_REQ_INFLIGHT;
392 return;
393}
394
395static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
396 struct tegra_dma_req *req)
397{
398 int ahb_addr_wrap;
399 int apb_addr_wrap;
400 int ahb_bus_width;
401 int apb_bus_width;
402 int index;
403 unsigned long csr;
404
405
406 ch->csr |= CSR_FLOW;
407 ch->csr &= ~CSR_REQ_SEL_MASK;
408 ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
409 ch->ahb_seq &= ~AHB_SEQ_BURST_MASK;
410 ch->ahb_seq |= AHB_SEQ_BURST_1;
411
412 /* One shot mode is always single buffered,
413 * continuous mode is always double buffered
414 * */
415 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
416 ch->csr |= CSR_ONCE;
417 ch->ahb_seq &= ~AHB_SEQ_DBL_BUF;
418 ch->csr &= ~CSR_WCOUNT_MASK;
419 ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT;
420 } else {
421 ch->csr &= ~CSR_ONCE;
422 ch->ahb_seq |= AHB_SEQ_DBL_BUF;
423
424 /* In double buffered mode, we set the size to half the
425 * requested size and interrupt when half the buffer
426 * is full */
427 ch->csr &= ~CSR_WCOUNT_MASK;
428 ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT;
429 }
430
431 if (req->to_memory) {
432 ch->csr &= ~CSR_DIR;
433 ch->apb_ptr = req->source_addr;
434 ch->ahb_ptr = req->dest_addr;
435
436 apb_addr_wrap = req->source_wrap;
437 ahb_addr_wrap = req->dest_wrap;
438 apb_bus_width = req->source_bus_width;
439 ahb_bus_width = req->dest_bus_width;
440
441 } else {
442 ch->csr |= CSR_DIR;
443 ch->apb_ptr = req->dest_addr;
444 ch->ahb_ptr = req->source_addr;
445
446 apb_addr_wrap = req->dest_wrap;
447 ahb_addr_wrap = req->source_wrap;
448 apb_bus_width = req->dest_bus_width;
449 ahb_bus_width = req->source_bus_width;
450 }
451
452 apb_addr_wrap >>= 2;
453 ahb_addr_wrap >>= 2;
454
455 /* set address wrap for APB size */
456 index = 0;
457 do {
458 if (apb_addr_wrap_table[index] == apb_addr_wrap)
459 break;
460 index++;
461 } while (index < ARRAY_SIZE(apb_addr_wrap_table));
462 BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
463 ch->apb_seq &= ~APB_SEQ_WRAP_MASK;
464 ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT;
465
466 /* set address wrap for AHB size */
467 index = 0;
468 do {
469 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
470 break;
471 index++;
472 } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
473 BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
474 ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK;
475 ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
476
477 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
478 if (bus_width_table[index] == ahb_bus_width)
479 break;
480 }
481 BUG_ON(index == ARRAY_SIZE(bus_width_table));
482 ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK;
483 ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
484
485 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
486 if (bus_width_table[index] == apb_bus_width)
487 break;
488 }
489 BUG_ON(index == ARRAY_SIZE(bus_width_table));
490 ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK;
491 ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
492
493 ch->csr |= CSR_IE_EOC;
494
495 /* update hw registers with the shadow */
496 writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR);
497 writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
498 writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
499 writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
500 writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
501
502 csr = ch->csr | CSR_ENB;
503 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
504
505 req->status = TEGRA_DMA_REQ_INFLIGHT;
506}
507
508static void tegra_dma_init_hw(struct tegra_dma_channel *ch)
509{
510 /* One shot with an interrupt to CPU after transfer */
511 ch->csr = CSR_ONCE | CSR_IE_EOC;
512 ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB;
513 ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT;
514}
515
516static void handle_oneshot_dma(struct tegra_dma_channel *ch)
517{
518 struct tegra_dma_req *req;
519
520 spin_lock(&ch->lock);
521 if (list_empty(&ch->list)) {
522 spin_unlock(&ch->lock);
523 return;
524 }
525
526 req = list_entry(ch->list.next, typeof(*req), node);
527 if (req) {
528 int bytes_transferred;
529
530 bytes_transferred =
531 (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
532 bytes_transferred += 1;
533 bytes_transferred <<= 2;
534
535 list_del(&req->node);
536 req->bytes_transferred = bytes_transferred;
537 req->status = TEGRA_DMA_REQ_SUCCESS;
538
539 spin_unlock(&ch->lock);
540 /* Callback should be called without any lock */
541 pr_debug("%s: transferred %d bytes\n", __func__,
542 req->bytes_transferred);
543 req->complete(req);
544 spin_lock(&ch->lock);
545 }
546
547 if (!list_empty(&ch->list)) {
548 req = list_entry(ch->list.next, typeof(*req), node);
549 /* the complete function we just called may have enqueued
550 another req, in which case dma has already started */
551 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
552 tegra_dma_update_hw(ch, req);
553 }
554 spin_unlock(&ch->lock);
555}
556
557static void handle_continuous_dma(struct tegra_dma_channel *ch)
558{
559 struct tegra_dma_req *req;
560
561 spin_lock(&ch->lock);
562 if (list_empty(&ch->list)) {
563 spin_unlock(&ch->lock);
564 return;
565 }
566
567 req = list_entry(ch->list.next, typeof(*req), node);
568 if (req) {
569 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
570 /* Load the next request into the hardware, if available
571 * */
572 if (!list_is_last(&req->node, &ch->list)) {
573 struct tegra_dma_req *next_req;
574
575 next_req = list_entry(req->node.next,
576 typeof(*next_req), node);
577 tegra_dma_update_hw_partial(ch, next_req);
578 }
579 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
580 req->status = TEGRA_DMA_REQ_SUCCESS;
581 /* DMA lock is NOT held when callback is called */
582 spin_unlock(&ch->lock);
583 if (likely(req->threshold))
584 req->threshold(req);
585 return;
586
587 } else if (req->buffer_status ==
588 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
589 /* Callback when the buffer is completely full (i.e on
590 * the second interrupt */
591 int bytes_transferred;
592
593 bytes_transferred =
594 (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
595 bytes_transferred += 1;
596 bytes_transferred <<= 3;
597
598 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
599 req->bytes_transferred = bytes_transferred;
600 req->status = TEGRA_DMA_REQ_SUCCESS;
601 list_del(&req->node);
602
603 /* DMA lock is NOT held when callbak is called */
604 spin_unlock(&ch->lock);
605 req->complete(req);
606 return;
607
608 } else {
609 BUG();
610 }
611 }
612 spin_unlock(&ch->lock);
613}
614
615static irqreturn_t dma_isr(int irq, void *data)
616{
617 struct tegra_dma_channel *ch = data;
618 unsigned long status;
619
620 status = readl(ch->addr + APB_DMA_CHAN_STA);
621 if (status & STA_ISE_EOC)
622 writel(status, ch->addr + APB_DMA_CHAN_STA);
623 else {
624 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
625 return IRQ_HANDLED;
626 }
627 return IRQ_WAKE_THREAD;
628}
629
630static irqreturn_t dma_thread_fn(int irq, void *data)
631{
632 struct tegra_dma_channel *ch = data;
633
634 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
635 handle_oneshot_dma(ch);
636 else
637 handle_continuous_dma(ch);
638
639
640 return IRQ_HANDLED;
641}
642
643int __init tegra_dma_init(void)
644{
645 int ret = 0;
646 int i;
647 unsigned int irq;
648 void __iomem *addr;
649
650 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
651 writel(GEN_ENABLE, addr + APB_DMA_GEN);
652 writel(0, addr + APB_DMA_CNTRL);
653 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
654 addr + APB_DMA_IRQ_MASK_SET);
655
656 memset(channel_usage, 0, sizeof(channel_usage));
657 memset(dma_channels, 0, sizeof(dma_channels));
658
659 /* Reserve all the channels we are not supposed to touch */
660 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
661 __set_bit(i, channel_usage);
662
663 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
664 struct tegra_dma_channel *ch = &dma_channels[i];
665
666 __clear_bit(i, channel_usage);
667
668 ch->id = i;
669 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
670
671 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
672 TEGRA_APB_DMA_CH0_SIZE * i);
673
674 spin_lock_init(&ch->lock);
675 INIT_LIST_HEAD(&ch->list);
676 tegra_dma_init_hw(ch);
677
678 irq = INT_APB_DMA_CH0 + i;
679 ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
680 dma_channels[i].name, ch);
681 if (ret) {
682 pr_err("Failed to register IRQ %d for DMA %d\n",
683 irq, i);
684 goto fail;
685 }
686 ch->irq = irq;
687 }
688 /* mark the shared channel allocated */
689 __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
690
691 for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
692 __set_bit(i, channel_usage);
693
694 return ret;
695fail:
696 writel(0, addr + APB_DMA_GEN);
697 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
698 struct tegra_dma_channel *ch = &dma_channels[i];
699 if (ch->irq)
700 free_irq(ch->irq, ch);
701 }
702 return ret;
703}
704
705#ifdef CONFIG_PM
706static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
707
708void tegra_dma_suspend(void)
709{
710 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
711 u32 *ctx = apb_dma;
712 int i;
713
714 *ctx++ = readl(addr + APB_DMA_GEN);
715 *ctx++ = readl(addr + APB_DMA_CNTRL);
716 *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
717
718 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
719 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
720 TEGRA_APB_DMA_CH0_SIZE * i);
721
722 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
723 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
724 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
725 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
726 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
727 }
728}
729
730void tegra_dma_resume(void)
731{
732 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
733 u32 *ctx = apb_dma;
734 int i;
735
736 writel(*ctx++, addr + APB_DMA_GEN);
737 writel(*ctx++, addr + APB_DMA_CNTRL);
738 writel(*ctx++, addr + APB_DMA_IRQ_MASK);
739
740 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
741 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
742 TEGRA_APB_DMA_CH0_SIZE * i);
743
744 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
745 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
746 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
747 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
748 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
749 }
750}
751
752#endif
diff --git a/arch/arm/mach-tegra/include/mach/dma.h b/arch/arm/mach-tegra/include/mach/dma.h
new file mode 100644
index 000000000000..39011bd9a925
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/dma.h
@@ -0,0 +1,155 @@
1/*
2 * arch/arm/mach-tegra/include/mach/dma.h
3 *
4 * Copyright (c) 2008-2009, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#ifndef __MACH_TEGRA_DMA_H
22#define __MACH_TEGRA_DMA_H
23
24#include <linux/list.h>
25
26#if defined(CONFIG_TEGRA_SYSTEM_DMA)
27
28struct tegra_dma_req;
29struct tegra_dma_channel;
30
31#define TEGRA_DMA_REQ_SEL_CNTR 0
32#define TEGRA_DMA_REQ_SEL_I2S_2 1
33#define TEGRA_DMA_REQ_SEL_I2S_1 2
34#define TEGRA_DMA_REQ_SEL_SPD_I 3
35#define TEGRA_DMA_REQ_SEL_UI_I 4
36#define TEGRA_DMA_REQ_SEL_MIPI 5
37#define TEGRA_DMA_REQ_SEL_I2S2_2 6
38#define TEGRA_DMA_REQ_SEL_I2S2_1 7
39#define TEGRA_DMA_REQ_SEL_UARTA 8
40#define TEGRA_DMA_REQ_SEL_UARTB 9
41#define TEGRA_DMA_REQ_SEL_UARTC 10
42#define TEGRA_DMA_REQ_SEL_SPI 11
43#define TEGRA_DMA_REQ_SEL_AC97 12
44#define TEGRA_DMA_REQ_SEL_ACMODEM 13
45#define TEGRA_DMA_REQ_SEL_SL4B 14
46#define TEGRA_DMA_REQ_SEL_SL2B1 15
47#define TEGRA_DMA_REQ_SEL_SL2B2 16
48#define TEGRA_DMA_REQ_SEL_SL2B3 17
49#define TEGRA_DMA_REQ_SEL_SL2B4 18
50#define TEGRA_DMA_REQ_SEL_UARTD 19
51#define TEGRA_DMA_REQ_SEL_UARTE 20
52#define TEGRA_DMA_REQ_SEL_I2C 21
53#define TEGRA_DMA_REQ_SEL_I2C2 22
54#define TEGRA_DMA_REQ_SEL_I2C3 23
55#define TEGRA_DMA_REQ_SEL_DVC_I2C 24
56#define TEGRA_DMA_REQ_SEL_OWR 25
57#define TEGRA_DMA_REQ_SEL_INVALID 31
58
59enum tegra_dma_mode {
60 TEGRA_DMA_SHARED = 1,
61 TEGRA_DMA_MODE_CONTINOUS = 2,
62 TEGRA_DMA_MODE_ONESHOT = 4,
63};
64
65enum tegra_dma_req_error {
66 TEGRA_DMA_REQ_SUCCESS = 0,
67 TEGRA_DMA_REQ_ERROR_ABORTED,
68 TEGRA_DMA_REQ_INFLIGHT,
69};
70
71enum tegra_dma_req_buff_status {
72 TEGRA_DMA_REQ_BUF_STATUS_EMPTY = 0,
73 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL,
74 TEGRA_DMA_REQ_BUF_STATUS_FULL,
75};
76
77struct tegra_dma_req {
78 struct list_head node;
79 unsigned int modid;
80 int instance;
81
82 /* Called when the req is complete and from the DMA ISR context.
83 * When this is called the req structure is no longer queued by
84 * the DMA channel.
85 *
86 * State of the DMA depends on the number of req it has. If there are
87 * no DMA requests queued up, then it will STOP the DMA. It there are
88 * more requests in the DMA, then it will queue the next request.
89 */
90 void (*complete)(struct tegra_dma_req *req);
91
92 /* This is a called from the DMA ISR context when the DMA is still in
93 * progress and is actively filling same buffer.
94 *
95 * In case of continous mode receive, this threshold is 1/2 the buffer
96 * size. In other cases, this will not even be called as there is no
97 * hardware support for it.
98 *
99 * In the case of continous mode receive, if there is next req already
100 * queued, DMA programs the HW to use that req when this req is
101 * completed. If there is no "next req" queued, then DMA ISR doesn't do
102 * anything before calling this callback.
103 *
104 * This is mainly used by the cases, where the clients has queued
105 * only one req and want to get some sort of DMA threshold
106 * callback to program the next buffer.
107 *
108 */
109 void (*threshold)(struct tegra_dma_req *req);
110
111 /* 1 to copy to memory.
112 * 0 to copy from the memory to device FIFO */
113 int to_memory;
114
115 void *virt_addr;
116
117 unsigned long source_addr;
118 unsigned long dest_addr;
119 unsigned long dest_wrap;
120 unsigned long source_wrap;
121 unsigned long source_bus_width;
122 unsigned long dest_bus_width;
123 unsigned long req_sel;
124 unsigned int size;
125
126 /* Updated by the DMA driver on the conpletion of the request. */
127 int bytes_transferred;
128 int status;
129
130 /* DMA completion tracking information */
131 int buffer_status;
132
133 /* Client specific data */
134 void *dev;
135};
136
137int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
138 struct tegra_dma_req *req);
139int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
140 struct tegra_dma_req *req);
141void tegra_dma_dequeue(struct tegra_dma_channel *ch);
142void tegra_dma_flush(struct tegra_dma_channel *ch);
143
144bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
145 struct tegra_dma_req *req);
146bool tegra_dma_is_empty(struct tegra_dma_channel *ch);
147
148struct tegra_dma_channel *tegra_dma_allocate_channel(int mode);
149void tegra_dma_free_channel(struct tegra_dma_channel *ch);
150
151int __init tegra_dma_init(void);
152
153#endif
154
155#endif