aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/davinci_cpdma.c
diff options
context:
space:
mode:
authorCyril Chemparathy <cyril@ti.com>2010-09-15 10:11:28 -0400
committerKevin Hilman <khilman@deeprootsystems.com>2010-09-24 10:40:31 -0400
commitef8c2dab01b6e30c4b2ca3ea3b8db33430493589 (patch)
tree4556ea23735493311258114d770c5e17d55ca8f5 /drivers/net/davinci_cpdma.c
parent7b3742aff1a9946b9b25f16d6a7ca22c10926391 (diff)
net: davinci_emac: separate out cpdma code
In addition to being embedded into the EMAC controller, the CPDMA hardware block is used in TI's CPSW switch controller. Fortunately, the programming interface to this hardware block remains pretty nicely consistent across these devices. This patch adds a new CPDMA services layer, which can then be reused across EMAC and CPSW drivers. Signed-off-by: Cyril Chemparathy <cyril@ti.com> Tested-by: Michael Williamson <michael.williamson@criticallink.com> Tested-by: Caglar Akyuz <caglarakyuz@gmail.com> Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com>
Diffstat (limited to 'drivers/net/davinci_cpdma.c')
-rw-r--r--drivers/net/davinci_cpdma.c965
1 files changed, 965 insertions, 0 deletions
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c
new file mode 100644
index 000000000000..e92b2b6cd8c4
--- /dev/null
+++ b/drivers/net/davinci_cpdma.c
@@ -0,0 +1,965 @@
1/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/device.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/dma-mapping.h>
21#include <linux/io.h>
22
23#include "davinci_cpdma.h"
24
25/* DMA Registers */
26#define CPDMA_TXIDVER 0x00
27#define CPDMA_TXCONTROL 0x04
28#define CPDMA_TXTEARDOWN 0x08
29#define CPDMA_RXIDVER 0x10
30#define CPDMA_RXCONTROL 0x14
31#define CPDMA_SOFTRESET 0x1c
32#define CPDMA_RXTEARDOWN 0x18
33#define CPDMA_TXINTSTATRAW 0x80
34#define CPDMA_TXINTSTATMASKED 0x84
35#define CPDMA_TXINTMASKSET 0x88
36#define CPDMA_TXINTMASKCLEAR 0x8c
37#define CPDMA_MACINVECTOR 0x90
38#define CPDMA_MACEOIVECTOR 0x94
39#define CPDMA_RXINTSTATRAW 0xa0
40#define CPDMA_RXINTSTATMASKED 0xa4
41#define CPDMA_RXINTMASKSET 0xa8
42#define CPDMA_RXINTMASKCLEAR 0xac
43#define CPDMA_DMAINTSTATRAW 0xb0
44#define CPDMA_DMAINTSTATMASKED 0xb4
45#define CPDMA_DMAINTMASKSET 0xb8
46#define CPDMA_DMAINTMASKCLEAR 0xbc
47#define CPDMA_DMAINT_HOSTERR BIT(1)
48
49/* the following exist only if has_ext_regs is set */
50#define CPDMA_DMACONTROL 0x20
51#define CPDMA_DMASTATUS 0x24
52#define CPDMA_RXBUFFOFS 0x28
53#define CPDMA_EM_CONTROL 0x2c
54
55/* Descriptor mode bits */
56#define CPDMA_DESC_SOP BIT(31)
57#define CPDMA_DESC_EOP BIT(30)
58#define CPDMA_DESC_OWNER BIT(29)
59#define CPDMA_DESC_EOQ BIT(28)
60#define CPDMA_DESC_TD_COMPLETE BIT(27)
61#define CPDMA_DESC_PASS_CRC BIT(26)
62
63#define CPDMA_TEARDOWN_VALUE 0xfffffffc
64
65struct cpdma_desc {
66 /* hardware fields */
67 u32 hw_next;
68 u32 hw_buffer;
69 u32 hw_len;
70 u32 hw_mode;
71 /* software fields */
72 void *sw_token;
73 u32 sw_buffer;
74 u32 sw_len;
75};
76
77struct cpdma_desc_pool {
78 u32 phys;
79 void __iomem *iomap; /* ioremap map */
80 void *cpumap; /* dma_alloc map */
81 int desc_size, mem_size;
82 int num_desc, used_desc;
83 unsigned long *bitmap;
84 struct device *dev;
85 spinlock_t lock;
86};
87
88enum cpdma_state {
89 CPDMA_STATE_IDLE,
90 CPDMA_STATE_ACTIVE,
91 CPDMA_STATE_TEARDOWN,
92};
93
94const char *cpdma_state_str[] = { "idle", "active", "teardown" };
95
96struct cpdma_ctlr {
97 enum cpdma_state state;
98 struct cpdma_params params;
99 struct device *dev;
100 struct cpdma_desc_pool *pool;
101 spinlock_t lock;
102 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
103};
104
105struct cpdma_chan {
106 enum cpdma_state state;
107 struct cpdma_ctlr *ctlr;
108 int chan_num;
109 spinlock_t lock;
110 struct cpdma_desc __iomem *head, *tail;
111 int count;
112 void __iomem *hdp, *cp, *rxfree;
113 u32 mask;
114 cpdma_handler_fn handler;
115 enum dma_data_direction dir;
116 struct cpdma_chan_stats stats;
117 /* offsets into dmaregs */
118 int int_set, int_clear, td;
119};
120
121/* The following make access to common cpdma_ctlr params more readable */
122#define dmaregs params.dmaregs
123#define num_chan params.num_chan
124
125/* various accessors */
126#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
127#define chan_read(chan, fld) __raw_readl((chan)->fld)
128#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
129#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
130#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
131#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
132
133/*
134 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
135 * emac) have dedicated on-chip memory for these descriptors. Some other
136 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
137 * abstract out these details
138 */
139static struct cpdma_desc_pool *
140cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
141{
142 int bitmap_size;
143 struct cpdma_desc_pool *pool;
144
145 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
146 if (!pool)
147 return NULL;
148
149 spin_lock_init(&pool->lock);
150
151 pool->dev = dev;
152 pool->mem_size = size;
153 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
154 pool->num_desc = size / pool->desc_size;
155
156 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
157 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
158 if (!pool->bitmap)
159 goto fail;
160
161 if (phys) {
162 pool->phys = phys;
163 pool->iomap = ioremap(phys, size);
164 } else {
165 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
166 GFP_KERNEL);
167 pool->iomap = (void __force __iomem *)pool->cpumap;
168 }
169
170 if (pool->iomap)
171 return pool;
172
173fail:
174 kfree(pool->bitmap);
175 kfree(pool);
176 return NULL;
177}
178
179static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
180{
181 unsigned long flags;
182
183 if (!pool)
184 return;
185
186 spin_lock_irqsave(&pool->lock, flags);
187 WARN_ON(pool->used_desc);
188 kfree(pool->bitmap);
189 if (pool->cpumap) {
190 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
191 pool->phys);
192 } else {
193 iounmap(pool->iomap);
194 }
195 spin_unlock_irqrestore(&pool->lock, flags);
196 kfree(pool);
197}
198
199static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
200 struct cpdma_desc __iomem *desc)
201{
202 if (!desc)
203 return 0;
204 return pool->phys + (__force dma_addr_t)desc -
205 (__force dma_addr_t)pool->iomap;
206}
207
208static inline struct cpdma_desc __iomem *
209desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
210{
211 return dma ? pool->iomap + dma - pool->phys : NULL;
212}
213
214static struct cpdma_desc __iomem *
215cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
216{
217 unsigned long flags;
218 int index;
219 struct cpdma_desc __iomem *desc = NULL;
220
221 spin_lock_irqsave(&pool->lock, flags);
222
223 index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
224 num_desc, 0);
225 if (index < pool->num_desc) {
226 bitmap_set(pool->bitmap, index, num_desc);
227 desc = pool->iomap + pool->desc_size * index;
228 pool->used_desc++;
229 }
230
231 spin_unlock_irqrestore(&pool->lock, flags);
232 return desc;
233}
234
235static void cpdma_desc_free(struct cpdma_desc_pool *pool,
236 struct cpdma_desc __iomem *desc, int num_desc)
237{
238 unsigned long flags, index;
239
240 index = ((unsigned long)desc - (unsigned long)pool->iomap) /
241 pool->desc_size;
242 spin_lock_irqsave(&pool->lock, flags);
243 bitmap_clear(pool->bitmap, index, num_desc);
244 pool->used_desc--;
245 spin_unlock_irqrestore(&pool->lock, flags);
246}
247
248struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
249{
250 struct cpdma_ctlr *ctlr;
251
252 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
253 if (!ctlr)
254 return NULL;
255
256 ctlr->state = CPDMA_STATE_IDLE;
257 ctlr->params = *params;
258 ctlr->dev = params->dev;
259 spin_lock_init(&ctlr->lock);
260
261 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
262 ctlr->params.desc_mem_phys,
263 ctlr->params.desc_mem_size,
264 ctlr->params.desc_align);
265 if (!ctlr->pool) {
266 kfree(ctlr);
267 return NULL;
268 }
269
270 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
271 ctlr->num_chan = CPDMA_MAX_CHANNELS;
272 return ctlr;
273}
274
275int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
276{
277 unsigned long flags;
278 int i;
279
280 spin_lock_irqsave(&ctlr->lock, flags);
281 if (ctlr->state != CPDMA_STATE_IDLE) {
282 spin_unlock_irqrestore(&ctlr->lock, flags);
283 return -EBUSY;
284 }
285
286 if (ctlr->params.has_soft_reset) {
287 unsigned long timeout = jiffies + HZ/10;
288
289 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
290 while (time_before(jiffies, timeout)) {
291 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
292 break;
293 }
294 WARN_ON(!time_before(jiffies, timeout));
295 }
296
297 for (i = 0; i < ctlr->num_chan; i++) {
298 __raw_writel(0, ctlr->params.txhdp + 4 * i);
299 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
300 __raw_writel(0, ctlr->params.txcp + 4 * i);
301 __raw_writel(0, ctlr->params.rxcp + 4 * i);
302 }
303
304 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
305 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
306
307 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
308 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
309
310 ctlr->state = CPDMA_STATE_ACTIVE;
311
312 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
313 if (ctlr->channels[i])
314 cpdma_chan_start(ctlr->channels[i]);
315 }
316 spin_unlock_irqrestore(&ctlr->lock, flags);
317 return 0;
318}
319
320int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
321{
322 unsigned long flags;
323 int i;
324
325 spin_lock_irqsave(&ctlr->lock, flags);
326 if (ctlr->state != CPDMA_STATE_ACTIVE) {
327 spin_unlock_irqrestore(&ctlr->lock, flags);
328 return -EINVAL;
329 }
330
331 ctlr->state = CPDMA_STATE_TEARDOWN;
332
333 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
334 if (ctlr->channels[i])
335 cpdma_chan_stop(ctlr->channels[i]);
336 }
337
338 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
339 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
340
341 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
342 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
343
344 ctlr->state = CPDMA_STATE_IDLE;
345
346 spin_unlock_irqrestore(&ctlr->lock, flags);
347 return 0;
348}
349
350int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
351{
352 struct device *dev = ctlr->dev;
353 unsigned long flags;
354 int i;
355
356 spin_lock_irqsave(&ctlr->lock, flags);
357
358 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
359
360 dev_info(dev, "CPDMA: txidver: %x",
361 dma_reg_read(ctlr, CPDMA_TXIDVER));
362 dev_info(dev, "CPDMA: txcontrol: %x",
363 dma_reg_read(ctlr, CPDMA_TXCONTROL));
364 dev_info(dev, "CPDMA: txteardown: %x",
365 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
366 dev_info(dev, "CPDMA: rxidver: %x",
367 dma_reg_read(ctlr, CPDMA_RXIDVER));
368 dev_info(dev, "CPDMA: rxcontrol: %x",
369 dma_reg_read(ctlr, CPDMA_RXCONTROL));
370 dev_info(dev, "CPDMA: softreset: %x",
371 dma_reg_read(ctlr, CPDMA_SOFTRESET));
372 dev_info(dev, "CPDMA: rxteardown: %x",
373 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
374 dev_info(dev, "CPDMA: txintstatraw: %x",
375 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
376 dev_info(dev, "CPDMA: txintstatmasked: %x",
377 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
378 dev_info(dev, "CPDMA: txintmaskset: %x",
379 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
380 dev_info(dev, "CPDMA: txintmaskclear: %x",
381 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
382 dev_info(dev, "CPDMA: macinvector: %x",
383 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
384 dev_info(dev, "CPDMA: maceoivector: %x",
385 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
386 dev_info(dev, "CPDMA: rxintstatraw: %x",
387 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
388 dev_info(dev, "CPDMA: rxintstatmasked: %x",
389 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
390 dev_info(dev, "CPDMA: rxintmaskset: %x",
391 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
392 dev_info(dev, "CPDMA: rxintmaskclear: %x",
393 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
394 dev_info(dev, "CPDMA: dmaintstatraw: %x",
395 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
396 dev_info(dev, "CPDMA: dmaintstatmasked: %x",
397 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
398 dev_info(dev, "CPDMA: dmaintmaskset: %x",
399 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
400 dev_info(dev, "CPDMA: dmaintmaskclear: %x",
401 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
402
403 if (!ctlr->params.has_ext_regs) {
404 dev_info(dev, "CPDMA: dmacontrol: %x",
405 dma_reg_read(ctlr, CPDMA_DMACONTROL));
406 dev_info(dev, "CPDMA: dmastatus: %x",
407 dma_reg_read(ctlr, CPDMA_DMASTATUS));
408 dev_info(dev, "CPDMA: rxbuffofs: %x",
409 dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
410 }
411
412 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
413 if (ctlr->channels[i])
414 cpdma_chan_dump(ctlr->channels[i]);
415
416 spin_unlock_irqrestore(&ctlr->lock, flags);
417 return 0;
418}
419
420int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
421{
422 unsigned long flags;
423 int ret = 0, i;
424
425 if (!ctlr)
426 return -EINVAL;
427
428 spin_lock_irqsave(&ctlr->lock, flags);
429 if (ctlr->state != CPDMA_STATE_IDLE)
430 cpdma_ctlr_stop(ctlr);
431
432 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
433 if (ctlr->channels[i])
434 cpdma_chan_destroy(ctlr->channels[i]);
435 }
436
437 cpdma_desc_pool_destroy(ctlr->pool);
438 spin_unlock_irqrestore(&ctlr->lock, flags);
439 kfree(ctlr);
440 return ret;
441}
442
443int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
444{
445 unsigned long flags;
446 int i, reg;
447
448 spin_lock_irqsave(&ctlr->lock, flags);
449 if (ctlr->state != CPDMA_STATE_ACTIVE) {
450 spin_unlock_irqrestore(&ctlr->lock, flags);
451 return -EINVAL;
452 }
453
454 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
455 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
456
457 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
458 if (ctlr->channels[i])
459 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
460 }
461
462 spin_unlock_irqrestore(&ctlr->lock, flags);
463 return 0;
464}
465
466void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
467{
468 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
469}
470
471struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
472 cpdma_handler_fn handler)
473{
474 struct cpdma_chan *chan;
475 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
476 unsigned long flags;
477
478 if (__chan_linear(chan_num) >= ctlr->num_chan)
479 return NULL;
480
481 ret = -ENOMEM;
482 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
483 if (!chan)
484 goto err_chan_alloc;
485
486 spin_lock_irqsave(&ctlr->lock, flags);
487 ret = -EBUSY;
488 if (ctlr->channels[chan_num])
489 goto err_chan_busy;
490
491 chan->ctlr = ctlr;
492 chan->state = CPDMA_STATE_IDLE;
493 chan->chan_num = chan_num;
494 chan->handler = handler;
495
496 if (is_rx_chan(chan)) {
497 chan->hdp = ctlr->params.rxhdp + offset;
498 chan->cp = ctlr->params.rxcp + offset;
499 chan->rxfree = ctlr->params.rxfree + offset;
500 chan->int_set = CPDMA_RXINTMASKSET;
501 chan->int_clear = CPDMA_RXINTMASKCLEAR;
502 chan->td = CPDMA_RXTEARDOWN;
503 chan->dir = DMA_FROM_DEVICE;
504 } else {
505 chan->hdp = ctlr->params.txhdp + offset;
506 chan->cp = ctlr->params.txcp + offset;
507 chan->int_set = CPDMA_TXINTMASKSET;
508 chan->int_clear = CPDMA_TXINTMASKCLEAR;
509 chan->td = CPDMA_TXTEARDOWN;
510 chan->dir = DMA_TO_DEVICE;
511 }
512 chan->mask = BIT(chan_linear(chan));
513
514 spin_lock_init(&chan->lock);
515
516 ctlr->channels[chan_num] = chan;
517 spin_unlock_irqrestore(&ctlr->lock, flags);
518 return chan;
519
520err_chan_busy:
521 spin_unlock_irqrestore(&ctlr->lock, flags);
522 kfree(chan);
523err_chan_alloc:
524 return ERR_PTR(ret);
525}
526
527int cpdma_chan_destroy(struct cpdma_chan *chan)
528{
529 struct cpdma_ctlr *ctlr = chan->ctlr;
530 unsigned long flags;
531
532 if (!chan)
533 return -EINVAL;
534
535 spin_lock_irqsave(&ctlr->lock, flags);
536 if (chan->state != CPDMA_STATE_IDLE)
537 cpdma_chan_stop(chan);
538 ctlr->channels[chan->chan_num] = NULL;
539 spin_unlock_irqrestore(&ctlr->lock, flags);
540 kfree(chan);
541 return 0;
542}
543
544int cpdma_chan_get_stats(struct cpdma_chan *chan,
545 struct cpdma_chan_stats *stats)
546{
547 unsigned long flags;
548 if (!chan)
549 return -EINVAL;
550 spin_lock_irqsave(&chan->lock, flags);
551 memcpy(stats, &chan->stats, sizeof(*stats));
552 spin_unlock_irqrestore(&chan->lock, flags);
553 return 0;
554}
555
556int cpdma_chan_dump(struct cpdma_chan *chan)
557{
558 unsigned long flags;
559 struct device *dev = chan->ctlr->dev;
560
561 spin_lock_irqsave(&chan->lock, flags);
562
563 dev_info(dev, "channel %d (%s %d) state %s",
564 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
565 chan_linear(chan), cpdma_state_str[chan->state]);
566 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
567 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
568 if (chan->rxfree) {
569 dev_info(dev, "\trxfree: %x\n",
570 chan_read(chan, rxfree));
571 }
572
573 dev_info(dev, "\tstats head_enqueue: %d\n",
574 chan->stats.head_enqueue);
575 dev_info(dev, "\tstats tail_enqueue: %d\n",
576 chan->stats.tail_enqueue);
577 dev_info(dev, "\tstats pad_enqueue: %d\n",
578 chan->stats.pad_enqueue);
579 dev_info(dev, "\tstats misqueued: %d\n",
580 chan->stats.misqueued);
581 dev_info(dev, "\tstats desc_alloc_fail: %d\n",
582 chan->stats.desc_alloc_fail);
583 dev_info(dev, "\tstats pad_alloc_fail: %d\n",
584 chan->stats.pad_alloc_fail);
585 dev_info(dev, "\tstats runt_receive_buff: %d\n",
586 chan->stats.runt_receive_buff);
587 dev_info(dev, "\tstats runt_transmit_buff: %d\n",
588 chan->stats.runt_transmit_buff);
589 dev_info(dev, "\tstats empty_dequeue: %d\n",
590 chan->stats.empty_dequeue);
591 dev_info(dev, "\tstats busy_dequeue: %d\n",
592 chan->stats.busy_dequeue);
593 dev_info(dev, "\tstats good_dequeue: %d\n",
594 chan->stats.good_dequeue);
595 dev_info(dev, "\tstats requeue: %d\n",
596 chan->stats.requeue);
597 dev_info(dev, "\tstats teardown_dequeue: %d\n",
598 chan->stats.teardown_dequeue);
599
600 spin_unlock_irqrestore(&chan->lock, flags);
601 return 0;
602}
603
604static void __cpdma_chan_submit(struct cpdma_chan *chan,
605 struct cpdma_desc __iomem *desc)
606{
607 struct cpdma_ctlr *ctlr = chan->ctlr;
608 struct cpdma_desc __iomem *prev = chan->tail;
609 struct cpdma_desc_pool *pool = ctlr->pool;
610 dma_addr_t desc_dma;
611 u32 mode;
612
613 desc_dma = desc_phys(pool, desc);
614
615 /* simple case - idle channel */
616 if (!chan->head) {
617 chan->stats.head_enqueue++;
618 chan->head = desc;
619 chan->tail = desc;
620 if (chan->state == CPDMA_STATE_ACTIVE)
621 chan_write(chan, hdp, desc_dma);
622 return;
623 }
624
625 /* first chain the descriptor at the tail of the list */
626 desc_write(prev, hw_next, desc_dma);
627 chan->tail = desc;
628 chan->stats.tail_enqueue++;
629
630 /* next check if EOQ has been triggered already */
631 mode = desc_read(prev, hw_mode);
632 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
633 (chan->state == CPDMA_STATE_ACTIVE)) {
634 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
635 chan_write(chan, hdp, desc_dma);
636 chan->stats.misqueued++;
637 }
638}
639
640int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
641 int len, gfp_t gfp_mask)
642{
643 struct cpdma_ctlr *ctlr = chan->ctlr;
644 struct cpdma_desc __iomem *desc;
645 dma_addr_t buffer;
646 unsigned long flags;
647 u32 mode;
648 int ret = 0;
649
650 spin_lock_irqsave(&chan->lock, flags);
651
652 if (chan->state == CPDMA_STATE_TEARDOWN) {
653 ret = -EINVAL;
654 goto unlock_ret;
655 }
656
657 desc = cpdma_desc_alloc(ctlr->pool, 1);
658 if (!desc) {
659 chan->stats.desc_alloc_fail++;
660 ret = -ENOMEM;
661 goto unlock_ret;
662 }
663
664 if (len < ctlr->params.min_packet_size) {
665 len = ctlr->params.min_packet_size;
666 chan->stats.runt_transmit_buff++;
667 }
668
669 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
670 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
671
672 desc_write(desc, hw_next, 0);
673 desc_write(desc, hw_buffer, buffer);
674 desc_write(desc, hw_len, len);
675 desc_write(desc, hw_mode, mode | len);
676 desc_write(desc, sw_token, token);
677 desc_write(desc, sw_buffer, buffer);
678 desc_write(desc, sw_len, len);
679
680 __cpdma_chan_submit(chan, desc);
681
682 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
683 chan_write(chan, rxfree, 1);
684
685 chan->count++;
686
687unlock_ret:
688 spin_unlock_irqrestore(&chan->lock, flags);
689 return ret;
690}
691
692static void __cpdma_chan_free(struct cpdma_chan *chan,
693 struct cpdma_desc __iomem *desc,
694 int outlen, int status)
695{
696 struct cpdma_ctlr *ctlr = chan->ctlr;
697 struct cpdma_desc_pool *pool = ctlr->pool;
698 dma_addr_t buff_dma;
699 int origlen;
700 void *token;
701
702 token = (void *)desc_read(desc, sw_token);
703 buff_dma = desc_read(desc, sw_buffer);
704 origlen = desc_read(desc, sw_len);
705
706 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
707 cpdma_desc_free(pool, desc, 1);
708 (*chan->handler)(token, outlen, status);
709}
710
711static int __cpdma_chan_process(struct cpdma_chan *chan)
712{
713 struct cpdma_ctlr *ctlr = chan->ctlr;
714 struct cpdma_desc __iomem *desc;
715 int status, outlen;
716 struct cpdma_desc_pool *pool = ctlr->pool;
717 dma_addr_t desc_dma;
718 unsigned long flags;
719
720 spin_lock_irqsave(&chan->lock, flags);
721
722 desc = chan->head;
723 if (!desc) {
724 chan->stats.empty_dequeue++;
725 status = -ENOENT;
726 goto unlock_ret;
727 }
728 desc_dma = desc_phys(pool, desc);
729
730 status = __raw_readl(&desc->hw_mode);
731 outlen = status & 0x7ff;
732 if (status & CPDMA_DESC_OWNER) {
733 chan->stats.busy_dequeue++;
734 status = -EBUSY;
735 goto unlock_ret;
736 }
737 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
738
739 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
740 chan_write(chan, cp, desc_dma);
741 chan->count--;
742 chan->stats.good_dequeue++;
743
744 if (status & CPDMA_DESC_EOQ) {
745 chan->stats.requeue++;
746 chan_write(chan, hdp, desc_phys(pool, chan->head));
747 }
748
749 spin_unlock_irqrestore(&chan->lock, flags);
750
751 __cpdma_chan_free(chan, desc, outlen, status);
752 return status;
753
754unlock_ret:
755 spin_unlock_irqrestore(&chan->lock, flags);
756 return status;
757}
758
759int cpdma_chan_process(struct cpdma_chan *chan, int quota)
760{
761 int used = 0, ret = 0;
762
763 if (chan->state != CPDMA_STATE_ACTIVE)
764 return -EINVAL;
765
766 while (used < quota) {
767 ret = __cpdma_chan_process(chan);
768 if (ret < 0)
769 break;
770 used++;
771 }
772 return used;
773}
774
775int cpdma_chan_start(struct cpdma_chan *chan)
776{
777 struct cpdma_ctlr *ctlr = chan->ctlr;
778 struct cpdma_desc_pool *pool = ctlr->pool;
779 unsigned long flags;
780
781 spin_lock_irqsave(&chan->lock, flags);
782 if (chan->state != CPDMA_STATE_IDLE) {
783 spin_unlock_irqrestore(&chan->lock, flags);
784 return -EBUSY;
785 }
786 if (ctlr->state != CPDMA_STATE_ACTIVE) {
787 spin_unlock_irqrestore(&chan->lock, flags);
788 return -EINVAL;
789 }
790 dma_reg_write(ctlr, chan->int_set, chan->mask);
791 chan->state = CPDMA_STATE_ACTIVE;
792 if (chan->head) {
793 chan_write(chan, hdp, desc_phys(pool, chan->head));
794 if (chan->rxfree)
795 chan_write(chan, rxfree, chan->count);
796 }
797
798 spin_unlock_irqrestore(&chan->lock, flags);
799 return 0;
800}
801
802int cpdma_chan_stop(struct cpdma_chan *chan)
803{
804 struct cpdma_ctlr *ctlr = chan->ctlr;
805 struct cpdma_desc_pool *pool = ctlr->pool;
806 unsigned long flags;
807 int ret;
808 unsigned long timeout;
809
810 spin_lock_irqsave(&chan->lock, flags);
811 if (chan->state != CPDMA_STATE_ACTIVE) {
812 spin_unlock_irqrestore(&chan->lock, flags);
813 return -EINVAL;
814 }
815
816 chan->state = CPDMA_STATE_TEARDOWN;
817 dma_reg_write(ctlr, chan->int_clear, chan->mask);
818
819 /* trigger teardown */
820 dma_reg_write(ctlr, chan->td, chan->chan_num);
821
822 /* wait for teardown complete */
823 timeout = jiffies + HZ/10; /* 100 msec */
824 while (time_before(jiffies, timeout)) {
825 u32 cp = chan_read(chan, cp);
826 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
827 break;
828 cpu_relax();
829 }
830 WARN_ON(!time_before(jiffies, timeout));
831 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
832
833 /* handle completed packets */
834 do {
835 ret = __cpdma_chan_process(chan);
836 if (ret < 0)
837 break;
838 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
839
840 /* remaining packets haven't been tx/rx'ed, clean them up */
841 while (chan->head) {
842 struct cpdma_desc __iomem *desc = chan->head;
843 dma_addr_t next_dma;
844
845 next_dma = desc_read(desc, hw_next);
846 chan->head = desc_from_phys(pool, next_dma);
847 chan->stats.teardown_dequeue++;
848
849 /* issue callback without locks held */
850 spin_unlock_irqrestore(&chan->lock, flags);
851 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
852 spin_lock_irqsave(&chan->lock, flags);
853 }
854
855 chan->state = CPDMA_STATE_IDLE;
856 spin_unlock_irqrestore(&chan->lock, flags);
857 return 0;
858}
859
860int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
861{
862 unsigned long flags;
863
864 spin_lock_irqsave(&chan->lock, flags);
865 if (chan->state != CPDMA_STATE_ACTIVE) {
866 spin_unlock_irqrestore(&chan->lock, flags);
867 return -EINVAL;
868 }
869
870 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
871 chan->mask);
872 spin_unlock_irqrestore(&chan->lock, flags);
873
874 return 0;
875}
876
877struct cpdma_control_info {
878 u32 reg;
879 u32 shift, mask;
880 int access;
881#define ACCESS_RO BIT(0)
882#define ACCESS_WO BIT(1)
883#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
884};
885
886struct cpdma_control_info controls[] = {
887 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
888 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
889 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
890 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
891 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
892 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
893 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
894 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
895 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
896 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
897 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
898};
899
900int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
901{
902 unsigned long flags;
903 struct cpdma_control_info *info = &controls[control];
904 int ret;
905
906 spin_lock_irqsave(&ctlr->lock, flags);
907
908 ret = -ENOTSUPP;
909 if (!ctlr->params.has_ext_regs)
910 goto unlock_ret;
911
912 ret = -EINVAL;
913 if (ctlr->state != CPDMA_STATE_ACTIVE)
914 goto unlock_ret;
915
916 ret = -ENOENT;
917 if (control < 0 || control >= ARRAY_SIZE(controls))
918 goto unlock_ret;
919
920 ret = -EPERM;
921 if ((info->access & ACCESS_RO) != ACCESS_RO)
922 goto unlock_ret;
923
924 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
925
926unlock_ret:
927 spin_unlock_irqrestore(&ctlr->lock, flags);
928 return ret;
929}
930
931int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
932{
933 unsigned long flags;
934 struct cpdma_control_info *info = &controls[control];
935 int ret;
936 u32 val;
937
938 spin_lock_irqsave(&ctlr->lock, flags);
939
940 ret = -ENOTSUPP;
941 if (!ctlr->params.has_ext_regs)
942 goto unlock_ret;
943
944 ret = -EINVAL;
945 if (ctlr->state != CPDMA_STATE_ACTIVE)
946 goto unlock_ret;
947
948 ret = -ENOENT;
949 if (control < 0 || control >= ARRAY_SIZE(controls))
950 goto unlock_ret;
951
952 ret = -EPERM;
953 if ((info->access & ACCESS_WO) != ACCESS_WO)
954 goto unlock_ret;
955
956 val = dma_reg_read(ctlr, info->reg);
957 val &= ~(info->mask << info->shift);
958 val |= (value & info->mask) << info->shift;
959 dma_reg_write(ctlr, info->reg, val);
960 ret = 0;
961
962unlock_ret:
963 spin_unlock_irqrestore(&ctlr->lock, flags);
964 return ret;
965}