aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-ep93xx/dma-m2p.c
diff options
context:
space:
mode:
authorRyan Mallon <ryan@bluewatersys.com>2009-03-02 15:27:38 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-03-03 07:20:11 -0500
commit6d831c6554e4f95083919914955a1a3a4a6acfa9 (patch)
tree839810e06475cda214b93340059c4c8881287cac /arch/arm/mach-ep93xx/dma-m2p.c
parent1c8daabe1dafc30fcc1d929e620269ffe99b6f8a (diff)
[ARM] 5414/2: ep93xx m2p dma core
Add m2p dma support to the ep93xx Signed-off-by: Ryan Mallon <ryan@bluewatersys.com> Acked-by: Lennert Buytenhek <buytenh@wantstofly.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mach-ep93xx/dma-m2p.c')
-rw-r--r--arch/arm/mach-ep93xx/dma-m2p.c408
1 files changed, 408 insertions, 0 deletions
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c
new file mode 100644
index 000000000000..a2df5bb7dff0
--- /dev/null
+++ b/arch/arm/mach-ep93xx/dma-m2p.c
@@ -0,0 +1,408 @@
1/*
2 * arch/arm/mach-ep93xx/dma-m2p.c
3 * M2P DMA handling for Cirrus EP93xx chips.
4 *
5 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Copyright (C) 2006 Applied Data Systems
7 *
8 * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16/*
17 * On the EP93xx chip the following peripherals my be allocated to the 10
18 * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
19 *
20 * I2S contains 3 Tx and 3 Rx DMA Channels
21 * AAC contains 3 Tx and 3 Rx DMA Channels
22 * UART1 contains 1 Tx and 1 Rx DMA Channels
23 * UART2 contains 1 Tx and 1 Rx DMA Channels
24 * UART3 contains 1 Tx and 1 Rx DMA Channels
25 * IrDA contains 1 Tx and 1 Rx DMA Channels
26 *
27 * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
28 * with this implementation.
29 */
30
31#include <linux/kernel.h>
32#include <linux/clk.h>
33#include <linux/err.h>
34#include <linux/interrupt.h>
35#include <linux/module.h>
36
37#include <mach/dma.h>
38#include <mach/hardware.h>
39
40#define M2P_CONTROL 0x00
41#define M2P_CONTROL_STALL_IRQ_EN (1 << 0)
42#define M2P_CONTROL_NFB_IRQ_EN (1 << 1)
43#define M2P_CONTROL_ERROR_IRQ_EN (1 << 3)
44#define M2P_CONTROL_ENABLE (1 << 4)
45#define M2P_INTERRUPT 0x04
46#define M2P_INTERRUPT_STALL (1 << 0)
47#define M2P_INTERRUPT_NFB (1 << 1)
48#define M2P_INTERRUPT_ERROR (1 << 3)
49#define M2P_PPALLOC 0x08
50#define M2P_STATUS 0x0c
51#define M2P_REMAIN 0x14
52#define M2P_MAXCNT0 0x20
53#define M2P_BASE0 0x24
54#define M2P_MAXCNT1 0x30
55#define M2P_BASE1 0x34
56
57#define STATE_IDLE 0 /* Channel is inactive. */
58#define STATE_STALL 1 /* Channel is active, no buffers pending. */
59#define STATE_ON 2 /* Channel is active, one buffer pending. */
60#define STATE_NEXT 3 /* Channel is active, two buffers pending. */
61
62struct m2p_channel {
63 char *name;
64 void __iomem *base;
65 int irq;
66
67 struct clk *clk;
68 spinlock_t lock;
69
70 void *client;
71 unsigned next_slot:1;
72 struct ep93xx_dma_buffer *buffer_xfer;
73 struct ep93xx_dma_buffer *buffer_next;
74 struct list_head buffers_pending;
75};
76
77static struct m2p_channel m2p_rx[] = {
78 {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1},
79 {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3},
80 {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5},
81 {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7},
82 {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9},
83 {NULL},
84};
85
86static struct m2p_channel m2p_tx[] = {
87 {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0},
88 {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2},
89 {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4},
90 {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6},
91 {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8},
92 {NULL},
93};
94
95static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf)
96{
97 if (ch->next_slot == 0) {
98 writel(buf->size, ch->base + M2P_MAXCNT0);
99 writel(buf->bus_addr, ch->base + M2P_BASE0);
100 } else {
101 writel(buf->size, ch->base + M2P_MAXCNT1);
102 writel(buf->bus_addr, ch->base + M2P_BASE1);
103 }
104 ch->next_slot ^= 1;
105}
106
107static void choose_buffer_xfer(struct m2p_channel *ch)
108{
109 struct ep93xx_dma_buffer *buf;
110
111 ch->buffer_xfer = NULL;
112 if (!list_empty(&ch->buffers_pending)) {
113 buf = list_entry(ch->buffers_pending.next,
114 struct ep93xx_dma_buffer, list);
115 list_del(&buf->list);
116 feed_buf(ch, buf);
117 ch->buffer_xfer = buf;
118 }
119}
120
121static void choose_buffer_next(struct m2p_channel *ch)
122{
123 struct ep93xx_dma_buffer *buf;
124
125 ch->buffer_next = NULL;
126 if (!list_empty(&ch->buffers_pending)) {
127 buf = list_entry(ch->buffers_pending.next,
128 struct ep93xx_dma_buffer, list);
129 list_del(&buf->list);
130 feed_buf(ch, buf);
131 ch->buffer_next = buf;
132 }
133}
134
135static inline void m2p_set_control(struct m2p_channel *ch, u32 v)
136{
137 /*
138 * The control register must be read immediately after being written so
139 * that the internal state machine is correctly updated. See the ep93xx
140 * users' guide for details.
141 */
142 writel(v, ch->base + M2P_CONTROL);
143 readl(ch->base + M2P_CONTROL);
144}
145
146static inline int m2p_channel_state(struct m2p_channel *ch)
147{
148 return (readl(ch->base + M2P_STATUS) >> 4) & 0x3;
149}
150
151static irqreturn_t m2p_irq(int irq, void *dev_id)
152{
153 struct m2p_channel *ch = dev_id;
154 struct ep93xx_dma_m2p_client *cl;
155 u32 irq_status, v;
156 int error = 0;
157
158 cl = ch->client;
159
160 spin_lock(&ch->lock);
161 irq_status = readl(ch->base + M2P_INTERRUPT);
162
163 if (irq_status & M2P_INTERRUPT_ERROR) {
164 writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT);
165 error = 1;
166 }
167
168 if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) {
169 spin_unlock(&ch->lock);
170 return IRQ_NONE;
171 }
172
173 switch (m2p_channel_state(ch)) {
174 case STATE_IDLE:
175 pr_crit("m2p_irq: dma interrupt without a dma buffer\n");
176 BUG();
177 break;
178
179 case STATE_STALL:
180 cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
181 if (ch->buffer_next != NULL) {
182 cl->buffer_finished(cl->cookie, ch->buffer_next,
183 0, error);
184 }
185 choose_buffer_xfer(ch);
186 choose_buffer_next(ch);
187 if (ch->buffer_xfer != NULL)
188 cl->buffer_started(cl->cookie, ch->buffer_xfer);
189 break;
190
191 case STATE_ON:
192 cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
193 ch->buffer_xfer = ch->buffer_next;
194 choose_buffer_next(ch);
195 cl->buffer_started(cl->cookie, ch->buffer_xfer);
196 break;
197
198 case STATE_NEXT:
199 pr_crit("m2p_irq: dma interrupt while next\n");
200 BUG();
201 break;
202 }
203
204 v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN |
205 M2P_CONTROL_NFB_IRQ_EN);
206 if (ch->buffer_xfer != NULL)
207 v |= M2P_CONTROL_STALL_IRQ_EN;
208 if (ch->buffer_next != NULL)
209 v |= M2P_CONTROL_NFB_IRQ_EN;
210 m2p_set_control(ch, v);
211
212 spin_unlock(&ch->lock);
213 return IRQ_HANDLED;
214}
215
216static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl)
217{
218 struct m2p_channel *ch;
219 int i;
220
221 if (cl->flags & EP93XX_DMA_M2P_RX)
222 ch = m2p_rx;
223 else
224 ch = m2p_tx;
225
226 for (i = 0; ch[i].base; i++) {
227 struct ep93xx_dma_m2p_client *client;
228
229 client = ch[i].client;
230 if (client != NULL) {
231 int port;
232
233 port = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
234 if (port == (client->flags &
235 EP93XX_DMA_M2P_PORT_MASK)) {
236 pr_warning("DMA channel already used by %s\n",
237 cl->name ? : "unknown client");
238 return ERR_PTR(-EBUSY);
239 }
240 }
241 }
242
243 for (i = 0; ch[i].base; i++) {
244 if (ch[i].client == NULL)
245 return ch + i;
246 }
247
248 pr_warning("No free DMA channel for %s\n",
249 cl->name ? : "unknown client");
250 return ERR_PTR(-ENODEV);
251}
252
253static void channel_enable(struct m2p_channel *ch)
254{
255 struct ep93xx_dma_m2p_client *cl = ch->client;
256 u32 v;
257
258 clk_enable(ch->clk);
259
260 v = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
261 writel(v, ch->base + M2P_PPALLOC);
262
263 v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK;
264 v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN;
265 m2p_set_control(ch, v);
266}
267
268static void channel_disable(struct m2p_channel *ch)
269{
270 u32 v;
271
272 v = readl(ch->base + M2P_CONTROL);
273 v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
274 m2p_set_control(ch, v);
275
276 while (m2p_channel_state(ch) == STATE_ON)
277 cpu_relax();
278
279 m2p_set_control(ch, 0x0);
280
281 while (m2p_channel_state(ch) == STATE_STALL)
282 cpu_relax();
283
284 clk_disable(ch->clk);
285}
286
287int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl)
288{
289 struct m2p_channel *ch;
290 int err;
291
292 ch = find_free_channel(cl);
293 if (IS_ERR(ch))
294 return PTR_ERR(ch);
295
296 err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch);
297 if (err)
298 return err;
299
300 ch->client = cl;
301 ch->next_slot = 0;
302 ch->buffer_xfer = NULL;
303 ch->buffer_next = NULL;
304 INIT_LIST_HEAD(&ch->buffers_pending);
305
306 cl->channel = ch;
307
308 channel_enable(ch);
309
310 return 0;
311}
312EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register);
313
314void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl)
315{
316 struct m2p_channel *ch = cl->channel;
317
318 channel_disable(ch);
319 free_irq(ch->irq, ch);
320 ch->client = NULL;
321}
322EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister);
323
324void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl,
325 struct ep93xx_dma_buffer *buf)
326{
327 struct m2p_channel *ch = cl->channel;
328 unsigned long flags;
329 u32 v;
330
331 spin_lock_irqsave(&ch->lock, flags);
332 v = readl(ch->base + M2P_CONTROL);
333 if (ch->buffer_xfer == NULL) {
334 ch->buffer_xfer = buf;
335 feed_buf(ch, buf);
336 cl->buffer_started(cl->cookie, buf);
337
338 v |= M2P_CONTROL_STALL_IRQ_EN;
339 m2p_set_control(ch, v);
340
341 } else if (ch->buffer_next == NULL) {
342 ch->buffer_next = buf;
343 feed_buf(ch, buf);
344
345 v |= M2P_CONTROL_NFB_IRQ_EN;
346 m2p_set_control(ch, v);
347 } else {
348 list_add_tail(&buf->list, &ch->buffers_pending);
349 }
350 spin_unlock_irqrestore(&ch->lock, flags);
351}
352EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit);
353
354void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl,
355 struct ep93xx_dma_buffer *buf)
356{
357 struct m2p_channel *ch = cl->channel;
358
359 list_add_tail(&buf->list, &ch->buffers_pending);
360}
361EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive);
362
363void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl)
364{
365 struct m2p_channel *ch = cl->channel;
366
367 channel_disable(ch);
368 ch->next_slot = 0;
369 ch->buffer_xfer = NULL;
370 ch->buffer_next = NULL;
371 INIT_LIST_HEAD(&ch->buffers_pending);
372 channel_enable(ch);
373}
374EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush);
375
376static int init_channel(struct m2p_channel *ch)
377{
378 ch->clk = clk_get(NULL, ch->name);
379 if (IS_ERR(ch->clk))
380 return PTR_ERR(ch->clk);
381
382 spin_lock_init(&ch->lock);
383 ch->client = NULL;
384
385 return 0;
386}
387
388static int __init ep93xx_dma_m2p_init(void)
389{
390 int i;
391 int ret;
392
393 for (i = 0; m2p_rx[i].base; i++) {
394 ret = init_channel(m2p_rx + i);
395 if (ret)
396 return ret;
397 }
398
399 for (i = 0; m2p_tx[i].base; i++) {
400 ret = init_channel(m2p_tx + i);
401 if (ret)
402 return ret;
403 }
404
405 pr_info("M2P DMA subsystem initialized\n");
406 return 0;
407}
408arch_initcall(ep93xx_dma_m2p_init);