aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/soc
diff options
context:
space:
mode:
authorSantosh Shilimkar <santosh.shilimkar@ti.com>2014-03-30 17:29:04 -0400
committerSantosh Shilimkar <santosh.shilimkar@ti.com>2014-09-24 09:49:15 -0400
commit88139ed030583557751e279968e13e892ae10825 (patch)
tree0677ae76fa8daca0599669a748dabb3fc75c0332 /drivers/soc
parent8172296d8717be1951da4bb4feb2700a60e8cdde (diff)
soc: ti: add Keystone Navigator DMA support
The Keystone Navigator DMA driver sets up the dma channels and flows for the QMSS(Queue Manager SubSystem) who triggers the actual data movements across clients using destination queues. Every client modules like NETCP(Network Coprocessor), SRIO(Serial Rapid IO) and CRYPTO Engines has its own instance of packet dma hardware. QMSS has also an internal packet DMA module which is used as an infrastructure DMA with zero copy. Initially this driver was proposed as DMA engine driver but since the hardware is not typical DMA engine and hence doesn't comply with typical DMA engine driver needs, that approach was naked. Link to that discussion - https://lkml.org/lkml/2014/3/18/340 As aligned, now we pair the Navigator DMA with its companion Navigator QMSS subsystem driver. Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Kumar Gala <galak@codeaurora.org> Cc: Olof Johansson <olof@lixom.net> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Grant Likely <grant.likely@linaro.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Sandeep Nair <sandeep_n@ti.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/ti/Kconfig10
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/knav_dma.c815
3 files changed, 826 insertions, 0 deletions
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index f73896f762e8..7266b2165183 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -18,4 +18,14 @@ config KEYSTONE_NAVIGATOR_QMSS
18 18
19 If unsure, say N. 19 If unsure, say N.
20 20
21config KEYSTONE_NAVIGATOR_DMA
22 tristate "TI Keystone Navigator Packet DMA support"
23 depends on ARCH_KEYSTONE
24 help
25 Say y tp enable support for the Keystone Navigator Packet DMA on
26 on Keystone family of devices. It sets up the dma channels for the
27 Queue Manager Sub System.
28
29 If unsure, say N.
30
21endif # SOC_TI 31endif # SOC_TI
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index bf85cacd5b85..6bed611e1934 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -2,3 +2,4 @@
2# TI Keystone SOC drivers 2# TI Keystone SOC drivers
3# 3#
4obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss_queue.o knav_qmss_acc.o 4obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss_queue.o knav_qmss_acc.o
5obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
new file mode 100644
index 000000000000..17264275f32b
--- /dev/null
+++ b/drivers/soc/ti/knav_dma.c
@@ -0,0 +1,815 @@
1/*
2 * Copyright (C) 2014 Texas Instruments Incorporated
3 * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com>
4 * Sandeep Nair <sandeep_n@ti.com>
5 * Cyril Chemparathy <cyril@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/io.h>
18#include <linux/sched.h>
19#include <linux/module.h>
20#include <linux/dma-direction.h>
21#include <linux/interrupt.h>
22#include <linux/pm_runtime.h>
23#include <linux/of_dma.h>
24#include <linux/of_address.h>
25#include <linux/platform_device.h>
26#include <linux/soc/ti/knav_dma.h>
27#include <linux/debugfs.h>
28#include <linux/seq_file.h>
29
30#define REG_MASK 0xffffffff
31
32#define DMA_LOOPBACK BIT(31)
33#define DMA_ENABLE BIT(31)
34#define DMA_TEARDOWN BIT(30)
35
36#define DMA_TX_FILT_PSWORDS BIT(29)
37#define DMA_TX_FILT_EINFO BIT(30)
38#define DMA_TX_PRIO_SHIFT 0
39#define DMA_RX_PRIO_SHIFT 16
40#define DMA_PRIO_MASK GENMASK(3, 0)
41#define DMA_PRIO_DEFAULT 0
42#define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */
43#define DMA_RX_TIMEOUT_MASK GENMASK(16, 0)
44#define DMA_RX_TIMEOUT_SHIFT 0
45
46#define CHAN_HAS_EPIB BIT(30)
47#define CHAN_HAS_PSINFO BIT(29)
48#define CHAN_ERR_RETRY BIT(28)
49#define CHAN_PSINFO_AT_SOP BIT(25)
50#define CHAN_SOP_OFF_SHIFT 16
51#define CHAN_SOP_OFF_MASK GENMASK(9, 0)
52#define DESC_TYPE_SHIFT 26
53#define DESC_TYPE_MASK GENMASK(2, 0)
54
55/*
56 * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical
57 * navigator cloud mapping scheme.
58 * using the 14bit physical queue numbers directly maps into this scheme.
59 */
60#define CHAN_QNUM_MASK GENMASK(14, 0)
61#define DMA_MAX_QMS 4
62#define DMA_TIMEOUT 1 /* msecs */
63#define DMA_INVALID_ID 0xffff
64
65struct reg_global {
66 u32 revision;
67 u32 perf_control;
68 u32 emulation_control;
69 u32 priority_control;
70 u32 qm_base_address[DMA_MAX_QMS];
71};
72
73struct reg_chan {
74 u32 control;
75 u32 mode;
76 u32 __rsvd[6];
77};
78
79struct reg_tx_sched {
80 u32 prio;
81};
82
83struct reg_rx_flow {
84 u32 control;
85 u32 tags;
86 u32 tag_sel;
87 u32 fdq_sel[2];
88 u32 thresh[3];
89};
90
91struct knav_dma_pool_device {
92 struct device *dev;
93 struct list_head list;
94};
95
96struct knav_dma_device {
97 bool loopback, enable_all;
98 unsigned tx_priority, rx_priority, rx_timeout;
99 unsigned logical_queue_managers;
100 unsigned qm_base_address[DMA_MAX_QMS];
101 struct reg_global __iomem *reg_global;
102 struct reg_chan __iomem *reg_tx_chan;
103 struct reg_rx_flow __iomem *reg_rx_flow;
104 struct reg_chan __iomem *reg_rx_chan;
105 struct reg_tx_sched __iomem *reg_tx_sched;
106 unsigned max_rx_chan, max_tx_chan;
107 unsigned max_rx_flow;
108 char name[32];
109 atomic_t ref_count;
110 struct list_head list;
111 struct list_head chan_list;
112 spinlock_t lock;
113};
114
115struct knav_dma_chan {
116 enum dma_transfer_direction direction;
117 struct knav_dma_device *dma;
118 atomic_t ref_count;
119
120 /* registers */
121 struct reg_chan __iomem *reg_chan;
122 struct reg_tx_sched __iomem *reg_tx_sched;
123 struct reg_rx_flow __iomem *reg_rx_flow;
124
125 /* configuration stuff */
126 unsigned channel, flow;
127 struct knav_dma_cfg cfg;
128 struct list_head list;
129 spinlock_t lock;
130};
131
132#define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \
133 ch->channel : ch->flow)
134
135static struct knav_dma_pool_device *kdev;
136
137static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg)
138{
139 if (!memcmp(&chan->cfg, cfg, sizeof(*cfg)))
140 return true;
141 else
142 return false;
143}
144
145static int chan_start(struct knav_dma_chan *chan,
146 struct knav_dma_cfg *cfg)
147{
148 u32 v = 0;
149
150 spin_lock(&chan->lock);
151 if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) {
152 if (cfg->u.tx.filt_pswords)
153 v |= DMA_TX_FILT_PSWORDS;
154 if (cfg->u.tx.filt_einfo)
155 v |= DMA_TX_FILT_EINFO;
156 writel_relaxed(v, &chan->reg_chan->mode);
157 writel_relaxed(DMA_ENABLE, &chan->reg_chan->control);
158 }
159
160 if (chan->reg_tx_sched)
161 writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio);
162
163 if (chan->reg_rx_flow) {
164 v = 0;
165
166 if (cfg->u.rx.einfo_present)
167 v |= CHAN_HAS_EPIB;
168 if (cfg->u.rx.psinfo_present)
169 v |= CHAN_HAS_PSINFO;
170 if (cfg->u.rx.err_mode == DMA_RETRY)
171 v |= CHAN_ERR_RETRY;
172 v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT;
173 if (cfg->u.rx.psinfo_at_sop)
174 v |= CHAN_PSINFO_AT_SOP;
175 v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK)
176 << CHAN_SOP_OFF_SHIFT;
177 v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK;
178
179 writel_relaxed(v, &chan->reg_rx_flow->control);
180 writel_relaxed(0, &chan->reg_rx_flow->tags);
181 writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
182
183 v = cfg->u.rx.fdq[0] << 16;
184 v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK;
185 writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]);
186
187 v = cfg->u.rx.fdq[2] << 16;
188 v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK;
189 writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]);
190
191 writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
192 writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
193 writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
194 }
195
196 /* Keep a copy of the cfg */
197 memcpy(&chan->cfg, cfg, sizeof(*cfg));
198 spin_unlock(&chan->lock);
199
200 return 0;
201}
202
203static int chan_teardown(struct knav_dma_chan *chan)
204{
205 unsigned long end, value;
206
207 if (!chan->reg_chan)
208 return 0;
209
210 /* indicate teardown */
211 writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control);
212
213 /* wait for the dma to shut itself down */
214 end = jiffies + msecs_to_jiffies(DMA_TIMEOUT);
215 do {
216 value = readl_relaxed(&chan->reg_chan->control);
217 if ((value & DMA_ENABLE) == 0)
218 break;
219 } while (time_after(end, jiffies));
220
221 if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) {
222 dev_err(kdev->dev, "timeout waiting for teardown\n");
223 return -ETIMEDOUT;
224 }
225
226 return 0;
227}
228
229static void chan_stop(struct knav_dma_chan *chan)
230{
231 spin_lock(&chan->lock);
232 if (chan->reg_rx_flow) {
233 /* first detach fdqs, starve out the flow */
234 writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]);
235 writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]);
236 writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
237 writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
238 writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
239 }
240
241 /* teardown the dma channel */
242 chan_teardown(chan);
243
244 /* then disconnect the completion side */
245 if (chan->reg_rx_flow) {
246 writel_relaxed(0, &chan->reg_rx_flow->control);
247 writel_relaxed(0, &chan->reg_rx_flow->tags);
248 writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
249 }
250
251 memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg));
252 spin_unlock(&chan->lock);
253
254 dev_dbg(kdev->dev, "channel stopped\n");
255}
256
257static void dma_hw_enable_all(struct knav_dma_device *dma)
258{
259 int i;
260
261 for (i = 0; i < dma->max_tx_chan; i++) {
262 writel_relaxed(0, &dma->reg_tx_chan[i].mode);
263 writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control);
264 }
265}
266
267
268static void knav_dma_hw_init(struct knav_dma_device *dma)
269{
270 unsigned v;
271 int i;
272
273 spin_lock(&dma->lock);
274 v = dma->loopback ? DMA_LOOPBACK : 0;
275 writel_relaxed(v, &dma->reg_global->emulation_control);
276
277 v = readl_relaxed(&dma->reg_global->perf_control);
278 v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT);
279 writel_relaxed(v, &dma->reg_global->perf_control);
280
281 v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) |
282 (dma->rx_priority << DMA_RX_PRIO_SHIFT));
283
284 writel_relaxed(v, &dma->reg_global->priority_control);
285
286 /* Always enable all Rx channels. Rx paths are managed using flows */
287 for (i = 0; i < dma->max_rx_chan; i++)
288 writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control);
289
290 for (i = 0; i < dma->logical_queue_managers; i++)
291 writel_relaxed(dma->qm_base_address[i],
292 &dma->reg_global->qm_base_address[i]);
293 spin_unlock(&dma->lock);
294}
295
296static void knav_dma_hw_destroy(struct knav_dma_device *dma)
297{
298 int i;
299 unsigned v;
300
301 spin_lock(&dma->lock);
302 v = ~DMA_ENABLE & REG_MASK;
303
304 for (i = 0; i < dma->max_rx_chan; i++)
305 writel_relaxed(v, &dma->reg_rx_chan[i].control);
306
307 for (i = 0; i < dma->max_tx_chan; i++)
308 writel_relaxed(v, &dma->reg_tx_chan[i].control);
309 spin_unlock(&dma->lock);
310}
311
312static void dma_debug_show_channels(struct seq_file *s,
313 struct knav_dma_chan *chan)
314{
315 int i;
316
317 seq_printf(s, "\t%s %d:\t",
318 ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"),
319 chan_number(chan));
320
321 if (chan->direction == DMA_MEM_TO_DEV) {
322 seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n",
323 chan->cfg.u.tx.filt_einfo,
324 chan->cfg.u.tx.filt_pswords,
325 chan->cfg.u.tx.priority);
326 } else {
327 seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n",
328 chan->cfg.u.rx.einfo_present,
329 chan->cfg.u.rx.psinfo_present,
330 chan->cfg.u.rx.desc_type);
331 seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ",
332 chan->cfg.u.rx.dst_q,
333 chan->cfg.u.rx.thresh);
334 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
335 seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]);
336 seq_printf(s, "\n");
337 }
338}
339
340static void dma_debug_show_devices(struct seq_file *s,
341 struct knav_dma_device *dma)
342{
343 struct knav_dma_chan *chan;
344
345 list_for_each_entry(chan, &dma->chan_list, list) {
346 if (atomic_read(&chan->ref_count))
347 dma_debug_show_channels(s, chan);
348 }
349}
350
351static int dma_debug_show(struct seq_file *s, void *v)
352{
353 struct knav_dma_device *dma;
354
355 list_for_each_entry(dma, &kdev->list, list) {
356 if (atomic_read(&dma->ref_count)) {
357 seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n",
358 dma->name, dma->max_tx_chan, dma->max_rx_flow);
359 dma_debug_show_devices(s, dma);
360 }
361 }
362
363 return 0;
364}
365
366static int knav_dma_debug_open(struct inode *inode, struct file *file)
367{
368 return single_open(file, dma_debug_show, NULL);
369}
370
371static const struct file_operations knav_dma_debug_ops = {
372 .open = knav_dma_debug_open,
373 .read = seq_read,
374 .llseek = seq_lseek,
375 .release = single_release,
376};
377
378static int of_channel_match_helper(struct device_node *np, const char *name,
379 const char **dma_instance)
380{
381 struct of_phandle_args args;
382 struct device_node *dma_node;
383 int index;
384
385 dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0);
386 if (!dma_node)
387 return -ENODEV;
388
389 *dma_instance = dma_node->name;
390 index = of_property_match_string(np, "ti,navigator-dma-names", name);
391 if (index < 0) {
392 dev_err(kdev->dev, "No 'ti,navigator-dma-names' propery\n");
393 return -ENODEV;
394 }
395
396 if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas",
397 1, index, &args)) {
398 dev_err(kdev->dev, "Missing the pahndle args name %s\n", name);
399 return -ENODEV;
400 }
401
402 if (args.args[0] < 0) {
403 dev_err(kdev->dev, "Missing args for %s\n", name);
404 return -ENODEV;
405 }
406
407 return args.args[0];
408}
409
410/**
411 * knav_dma_open_channel() - try to setup an exclusive slave channel
412 * @dev: pointer to client device structure
413 * @name: slave channel name
414 * @config: dma configuration parameters
415 *
416 * Returns pointer to appropriate DMA channel on success or NULL.
417 */
418void *knav_dma_open_channel(struct device *dev, const char *name,
419 struct knav_dma_cfg *config)
420{
421 struct knav_dma_chan *chan;
422 struct knav_dma_device *dma;
423 bool found = false;
424 int chan_num = -1;
425 const char *instance;
426
427 if (!kdev) {
428 pr_err("keystone-navigator-dma driver not registered\n");
429 return (void *)-EINVAL;
430 }
431
432 chan_num = of_channel_match_helper(dev->of_node, name, &instance);
433 if (chan_num < 0) {
434 dev_err(kdev->dev, "No DMA instace with name %s\n", name);
435 return (void *)-EINVAL;
436 }
437
438 dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
439 config->direction == DMA_MEM_TO_DEV ? "transmit" :
440 config->direction == DMA_DEV_TO_MEM ? "receive" :
441 "unknown", chan_num, instance);
442
443 if (config->direction != DMA_MEM_TO_DEV &&
444 config->direction != DMA_DEV_TO_MEM) {
445 dev_err(kdev->dev, "bad direction\n");
446 return (void *)-EINVAL;
447 }
448
449 /* Look for correct dma instance */
450 list_for_each_entry(dma, &kdev->list, list) {
451 if (!strcmp(dma->name, instance)) {
452 found = true;
453 break;
454 }
455 }
456 if (!found) {
457 dev_err(kdev->dev, "No DMA instace with name %s\n", instance);
458 return (void *)-EINVAL;
459 }
460
461 /* Look for correct dma channel from dma instance */
462 found = false;
463 list_for_each_entry(chan, &dma->chan_list, list) {
464 if (config->direction == DMA_MEM_TO_DEV) {
465 if (chan->channel == chan_num) {
466 found = true;
467 break;
468 }
469 } else {
470 if (chan->flow == chan_num) {
471 found = true;
472 break;
473 }
474 }
475 }
476 if (!found) {
477 dev_err(kdev->dev, "channel %d is not in DMA %s\n",
478 chan_num, instance);
479 return (void *)-EINVAL;
480 }
481
482 if (atomic_read(&chan->ref_count) >= 1) {
483 if (!check_config(chan, config)) {
484 dev_err(kdev->dev, "channel %d config miss-match\n",
485 chan_num);
486 return (void *)-EINVAL;
487 }
488 }
489
490 if (atomic_inc_return(&chan->dma->ref_count) <= 1)
491 knav_dma_hw_init(chan->dma);
492
493 if (atomic_inc_return(&chan->ref_count) <= 1)
494 chan_start(chan, config);
495
496 dev_dbg(kdev->dev, "channel %d opened from DMA %s\n",
497 chan_num, instance);
498
499 return chan;
500}
501EXPORT_SYMBOL_GPL(knav_dma_open_channel);
502
503/**
504 * knav_dma_close_channel() - Destroy a dma channel
505 *
506 * channel: dma channel handle
507 *
508 */
509void knav_dma_close_channel(void *channel)
510{
511 struct knav_dma_chan *chan = channel;
512
513 if (!kdev) {
514 pr_err("keystone-navigator-dma driver not registered\n");
515 return;
516 }
517
518 if (atomic_dec_return(&chan->ref_count) <= 0)
519 chan_stop(chan);
520
521 if (atomic_dec_return(&chan->dma->ref_count) <= 0)
522 knav_dma_hw_destroy(chan->dma);
523
524 dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n",
525 chan->channel, chan->flow, chan->dma->name);
526}
527EXPORT_SYMBOL_GPL(knav_dma_close_channel);
528
529static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
530 struct device_node *node,
531 unsigned index, resource_size_t *_size)
532{
533 struct device *dev = kdev->dev;
534 struct resource res;
535 void __iomem *regs;
536 int ret;
537
538 ret = of_address_to_resource(node, index, &res);
539 if (ret) {
540 dev_err(dev, "Can't translate of node(%s) address for index(%d)\n",
541 node->name, index);
542 return ERR_PTR(ret);
543 }
544
545 regs = devm_ioremap_resource(kdev->dev, &res);
546 if (IS_ERR(regs))
547 dev_err(dev, "Failed to map register base for index(%d) node(%s)\n",
548 index, node->name);
549 if (_size)
550 *_size = resource_size(&res);
551
552 return regs;
553}
554
555static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow)
556{
557 struct knav_dma_device *dma = chan->dma;
558
559 chan->flow = flow;
560 chan->reg_rx_flow = dma->reg_rx_flow + flow;
561 chan->channel = DMA_INVALID_ID;
562 dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow);
563
564 return 0;
565}
566
567static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel)
568{
569 struct knav_dma_device *dma = chan->dma;
570
571 chan->channel = channel;
572 chan->reg_chan = dma->reg_tx_chan + channel;
573 chan->reg_tx_sched = dma->reg_tx_sched + channel;
574 chan->flow = DMA_INVALID_ID;
575 dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan);
576
577 return 0;
578}
579
580static int pktdma_init_chan(struct knav_dma_device *dma,
581 enum dma_transfer_direction dir,
582 unsigned chan_num)
583{
584 struct device *dev = kdev->dev;
585 struct knav_dma_chan *chan;
586 int ret = -EINVAL;
587
588 chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
589 if (!chan)
590 return -ENOMEM;
591
592 INIT_LIST_HEAD(&chan->list);
593 chan->dma = dma;
594 chan->direction = DMA_NONE;
595 atomic_set(&chan->ref_count, 0);
596 spin_lock_init(&chan->lock);
597
598 if (dir == DMA_MEM_TO_DEV) {
599 chan->direction = dir;
600 ret = pktdma_init_tx_chan(chan, chan_num);
601 } else if (dir == DMA_DEV_TO_MEM) {
602 chan->direction = dir;
603 ret = pktdma_init_rx_chan(chan, chan_num);
604 } else {
605 dev_err(dev, "channel(%d) direction unknown\n", chan_num);
606 }
607
608 list_add_tail(&chan->list, &dma->chan_list);
609
610 return ret;
611}
612
613static int dma_init(struct device_node *cloud, struct device_node *dma_node)
614{
615 unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
616 struct device_node *node = dma_node;
617 struct knav_dma_device *dma;
618 int ret, len, num_chan = 0;
619 resource_size_t size;
620 u32 timeout;
621 u32 i;
622
623 dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL);
624 if (!dma) {
625 dev_err(kdev->dev, "could not allocate driver mem\n");
626 return -ENOMEM;
627 }
628 INIT_LIST_HEAD(&dma->list);
629 INIT_LIST_HEAD(&dma->chan_list);
630
631 if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
632 dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
633 return -ENODEV;
634 }
635
636 dma->logical_queue_managers = len / sizeof(u32);
637 if (dma->logical_queue_managers > DMA_MAX_QMS) {
638 dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
639 dma->logical_queue_managers);
640 dma->logical_queue_managers = DMA_MAX_QMS;
641 }
642
643 ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
644 dma->qm_base_address,
645 dma->logical_queue_managers);
646 if (ret) {
647 dev_err(kdev->dev, "invalid navigator cloud addresses\n");
648 return -ENODEV;
649 }
650
651 dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
652 if (!dma->reg_global)
653 return -ENODEV;
654 if (size < sizeof(struct reg_global)) {
655 dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
656 return -ENODEV;
657 }
658
659 dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
660 if (!dma->reg_tx_chan)
661 return -ENODEV;
662
663 max_tx_chan = size / sizeof(struct reg_chan);
664 dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
665 if (!dma->reg_rx_chan)
666 return -ENODEV;
667
668 max_rx_chan = size / sizeof(struct reg_chan);
669 dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
670 if (!dma->reg_tx_sched)
671 return -ENODEV;
672
673 max_tx_sched = size / sizeof(struct reg_tx_sched);
674 dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
675 if (!dma->reg_rx_flow)
676 return -ENODEV;
677
678 max_rx_flow = size / sizeof(struct reg_rx_flow);
679 dma->rx_priority = DMA_PRIO_DEFAULT;
680 dma->tx_priority = DMA_PRIO_DEFAULT;
681
682 dma->enable_all = (of_get_property(node, "ti,enable-all", NULL) != NULL);
683 dma->loopback = (of_get_property(node, "ti,loop-back", NULL) != NULL);
684
685 ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout);
686 if (ret < 0) {
687 dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n",
688 DMA_RX_TIMEOUT_DEFAULT);
689 timeout = DMA_RX_TIMEOUT_DEFAULT;
690 }
691
692 dma->rx_timeout = timeout;
693 dma->max_rx_chan = max_rx_chan;
694 dma->max_rx_flow = max_rx_flow;
695 dma->max_tx_chan = min(max_tx_chan, max_tx_sched);
696 atomic_set(&dma->ref_count, 0);
697 strcpy(dma->name, node->name);
698 spin_lock_init(&dma->lock);
699
700 for (i = 0; i < dma->max_tx_chan; i++) {
701 if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0)
702 num_chan++;
703 }
704
705 for (i = 0; i < dma->max_rx_flow; i++) {
706 if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0)
707 num_chan++;
708 }
709
710 list_add_tail(&dma->list, &kdev->list);
711
712 /*
713 * For DSP software usecases or userpace transport software, setup all
714 * the DMA hardware resources.
715 */
716 if (dma->enable_all) {
717 atomic_inc(&dma->ref_count);
718 knav_dma_hw_init(dma);
719 dma_hw_enable_all(dma);
720 }
721
722 dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n",
723 dma->name, num_chan, dma->max_rx_flow,
724 dma->max_tx_chan, dma->max_rx_chan,
725 dma->loopback ? ", loopback" : "");
726
727 return 0;
728}
729
730static int knav_dma_probe(struct platform_device *pdev)
731{
732 struct device *dev = &pdev->dev;
733 struct device_node *node = pdev->dev.of_node;
734 struct device_node *child;
735 int ret = 0;
736
737 if (!node) {
738 dev_err(&pdev->dev, "could not find device info\n");
739 return -EINVAL;
740 }
741
742 kdev = devm_kzalloc(dev,
743 sizeof(struct knav_dma_pool_device), GFP_KERNEL);
744 if (!kdev) {
745 dev_err(dev, "could not allocate driver mem\n");
746 return -ENOMEM;
747 }
748
749 kdev->dev = dev;
750 INIT_LIST_HEAD(&kdev->list);
751
752 pm_runtime_enable(kdev->dev);
753 ret = pm_runtime_get_sync(kdev->dev);
754 if (ret < 0) {
755 dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
756 return ret;
757 }
758
759 /* Initialise all packet dmas */
760 for_each_child_of_node(node, child) {
761 ret = dma_init(node, child);
762 if (ret) {
763 dev_err(&pdev->dev, "init failed with %d\n", ret);
764 break;
765 }
766 }
767
768 if (list_empty(&kdev->list)) {
769 dev_err(dev, "no valid dma instance\n");
770 return -ENODEV;
771 }
772
773 debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
774 &knav_dma_debug_ops);
775
776 return ret;
777}
778
779static int knav_dma_remove(struct platform_device *pdev)
780{
781 struct knav_dma_device *dma;
782
783 list_for_each_entry(dma, &kdev->list, list) {
784 if (atomic_dec_return(&dma->ref_count) == 0)
785 knav_dma_hw_destroy(dma);
786 }
787
788 pm_runtime_put_sync(&pdev->dev);
789 pm_runtime_disable(&pdev->dev);
790
791 return 0;
792}
793
794static struct of_device_id of_match[] = {
795 { .compatible = "ti,keystone-navigator-dma", },
796 {},
797};
798
799MODULE_DEVICE_TABLE(of, of_match);
800
801static struct platform_driver knav_dma_driver = {
802 .probe = knav_dma_probe,
803 .remove = knav_dma_remove,
804 .driver = {
805 .name = "keystone-navigator-dma",
806 .owner = THIS_MODULE,
807 .of_match_table = of_match,
808 },
809};
810module_platform_driver(knav_dma_driver);
811
812MODULE_LICENSE("GPL v2");
813MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver");
814MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
815MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");