aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>2015-02-23 09:24:42 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-03-06 21:23:02 -0500
commit2b49e0c56741fca538176f66ed3c8d16ce4fccd8 (patch)
treed2246e177136d99847de6b2da31ded7efa84e2f0
parent84e0185efaf8de931e1aab0687d8f8acd186a1c0 (diff)
dmaengine: append hsu DMA driver
The HSU DMA is developed to support High Speed UART controllers found in particular on Intel MID platforms such as Intel Medfield. The existing implementation is tighten to the drivers/tty/serial/mfd.c driver and has a lot of disadvantages. Besides that we would like to get rid of the old HS UART driver in regarding to extending the 8250 which supports generic DMAEngine API. That's why the current driver has been developed. Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/hsu/Kconfig14
-rw-r--r--drivers/dma/hsu/Makefile5
-rw-r--r--drivers/dma/hsu/hsu.c504
-rw-r--r--drivers/dma/hsu/hsu.h118
-rw-r--r--drivers/dma/hsu/pci.c123
-rw-r--r--include/linux/dma/hsu.h48
-rw-r--r--include/linux/platform_data/dma-hsu.h25
9 files changed, 840 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a874b6ec6650..074ffad334a7 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -125,6 +125,8 @@ config FSL_DMA
125 EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on 125 EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
126 some Txxx and Bxxx parts. 126 some Txxx and Bxxx parts.
127 127
128source "drivers/dma/hsu/Kconfig"
129
128config MPC512X_DMA 130config MPC512X_DMA
129 tristate "Freescale MPC512x built-in DMA engine support" 131 tristate "Freescale MPC512x built-in DMA engine support"
130 depends on PPC_MPC512x || PPC_MPC831x 132 depends on PPC_MPC512x || PPC_MPC831x
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index f915f61ec574..bf4485800c60 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_DMATEST) += dmatest.o
11obj-$(CONFIG_INTEL_IOATDMA) += ioat/ 11obj-$(CONFIG_INTEL_IOATDMA) += ioat/
12obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 12obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
13obj-$(CONFIG_FSL_DMA) += fsldma.o 13obj-$(CONFIG_FSL_DMA) += fsldma.o
14obj-$(CONFIG_HSU_DMA) += hsu/
14obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o 15obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
15obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ 16obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
16obj-$(CONFIG_MV_XOR) += mv_xor.o 17obj-$(CONFIG_MV_XOR) += mv_xor.o
diff --git a/drivers/dma/hsu/Kconfig b/drivers/dma/hsu/Kconfig
new file mode 100644
index 000000000000..7e98eff7440e
--- /dev/null
+++ b/drivers/dma/hsu/Kconfig
@@ -0,0 +1,14 @@
1# DMA engine configuration for hsu
2config HSU_DMA
3 tristate "High Speed UART DMA support"
4 select DMA_ENGINE
5 select DMA_VIRTUAL_CHANNELS
6
7config HSU_DMA_PCI
8 tristate "High Speed UART DMA PCI driver"
9 depends on PCI
10 select HSU_DMA
11 help
12 Support the High Speed UART DMA on the platfroms that
13 enumerate it as a PCI device. For example, Intel Medfield
14 has integrated this HSU DMA controller.
diff --git a/drivers/dma/hsu/Makefile b/drivers/dma/hsu/Makefile
new file mode 100644
index 000000000000..b8f9af032ef1
--- /dev/null
+++ b/drivers/dma/hsu/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_HSU_DMA) += hsu_dma.o
2hsu_dma-objs := hsu.o
3
4obj-$(CONFIG_HSU_DMA_PCI) += hsu_dma_pci.o
5hsu_dma_pci-objs := pci.o
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
new file mode 100644
index 000000000000..683ba9b62795
--- /dev/null
+++ b/drivers/dma/hsu/hsu.c
@@ -0,0 +1,504 @@
1/*
2 * Core driver for the High Speed UART DMA
3 *
4 * Copyright (C) 2015 Intel Corporation
5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
6 *
7 * Partially based on the bits found in drivers/tty/serial/mfd.c.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14/*
15 * DMA channel allocation:
16 * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
17 * Write (UART RX).
18 * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
19 * port 3, and so on.
20 */
21
22#include <linux/delay.h>
23#include <linux/dmaengine.h>
24#include <linux/dma-mapping.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28
29#include "hsu.h"
30
31#define HSU_DMA_BUSWIDTHS \
32 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
33 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
34 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
35 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
36 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
37 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \
38 BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
39
40static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
41{
42 hsu_chan_writel(hsuc, HSU_CH_CR, 0);
43}
44
45static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
46{
47 u32 cr = HSU_CH_CR_CHA;
48
49 if (hsuc->direction == DMA_MEM_TO_DEV)
50 cr &= ~HSU_CH_CR_CHD;
51 else if (hsuc->direction == DMA_DEV_TO_MEM)
52 cr |= HSU_CH_CR_CHD;
53
54 hsu_chan_writel(hsuc, HSU_CH_CR, cr);
55}
56
57static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
58{
59 struct dma_slave_config *config = &hsuc->config;
60 struct hsu_dma_desc *desc = hsuc->desc;
61 u32 bsr, mtsr;
62 u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
63 unsigned int i, count;
64
65 if (hsuc->direction == DMA_MEM_TO_DEV) {
66 bsr = config->dst_maxburst;
67 mtsr = config->dst_addr_width;
68 } else if (hsuc->direction == DMA_DEV_TO_MEM) {
69 bsr = config->src_maxburst;
70 mtsr = config->src_addr_width;
71 } else {
72 /* Not supported direction */
73 return;
74 }
75
76 hsu_chan_disable(hsuc);
77
78 hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
79 hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
80 hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
81
82 /* Set descriptors */
83 count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC;
84 for (i = 0; i < count; i++) {
85 hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
86 hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
87
88 /* Prepare value for DCR */
89 dcr |= HSU_CH_DCR_DESCA(i);
90 dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */
91
92 desc->active++;
93 }
94 /* Only for the last descriptor in the chain */
95 dcr |= HSU_CH_DCR_CHSOD(count - 1);
96 dcr |= HSU_CH_DCR_CHDI(count - 1);
97
98 hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
99
100 hsu_chan_enable(hsuc);
101}
102
103static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
104{
105 unsigned long flags;
106
107 spin_lock_irqsave(&hsuc->lock, flags);
108 hsu_chan_disable(hsuc);
109 hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
110 spin_unlock_irqrestore(&hsuc->lock, flags);
111}
112
113static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
114{
115 unsigned long flags;
116
117 spin_lock_irqsave(&hsuc->lock, flags);
118 hsu_dma_chan_start(hsuc);
119 spin_unlock_irqrestore(&hsuc->lock, flags);
120}
121
122static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
123{
124 struct virt_dma_desc *vdesc;
125
126 /* Get the next descriptor */
127 vdesc = vchan_next_desc(&hsuc->vchan);
128 if (!vdesc) {
129 hsuc->desc = NULL;
130 return;
131 }
132
133 list_del(&vdesc->node);
134 hsuc->desc = to_hsu_dma_desc(vdesc);
135
136 /* Start the channel with a new descriptor */
137 hsu_dma_start_channel(hsuc);
138}
139
140static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
141{
142 unsigned long flags;
143 u32 sr;
144
145 spin_lock_irqsave(&hsuc->lock, flags);
146 sr = hsu_chan_readl(hsuc, HSU_CH_SR);
147 spin_unlock_irqrestore(&hsuc->lock, flags);
148
149 return sr;
150}
151
152irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
153{
154 struct hsu_dma_chan *hsuc;
155 struct hsu_dma_desc *desc;
156 unsigned long flags;
157 u32 sr;
158
159 /* Sanity check */
160 if (nr >= chip->pdata->nr_channels)
161 return IRQ_NONE;
162
163 hsuc = &chip->hsu->chan[nr];
164
165 /*
166 * No matter what situation, need read clear the IRQ status
167 * There is a bug, see Errata 5, HSD 2900918
168 */
169 sr = hsu_dma_chan_get_sr(hsuc);
170 if (!sr)
171 return IRQ_NONE;
172
173 /* Timeout IRQ, need wait some time, see Errata 2 */
174 if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY))
175 udelay(2);
176
177 sr &= ~HSU_CH_SR_DESCTO_ANY;
178 if (!sr)
179 return IRQ_HANDLED;
180
181 spin_lock_irqsave(&hsuc->vchan.lock, flags);
182 desc = hsuc->desc;
183 if (desc) {
184 if (sr & HSU_CH_SR_CHE) {
185 desc->status = DMA_ERROR;
186 } else if (desc->active < desc->nents) {
187 hsu_dma_start_channel(hsuc);
188 } else {
189 vchan_cookie_complete(&desc->vdesc);
190 desc->status = DMA_COMPLETE;
191 hsu_dma_start_transfer(hsuc);
192 }
193 }
194 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
195
196 return IRQ_HANDLED;
197}
198EXPORT_SYMBOL_GPL(hsu_dma_irq);
199
200static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
201{
202 struct hsu_dma_desc *desc;
203
204 desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
205 if (!desc)
206 return NULL;
207
208 desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_ATOMIC);
209 if (!desc->sg) {
210 kfree(desc);
211 return NULL;
212 }
213
214 return desc;
215}
216
217static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
218{
219 struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
220
221 kfree(desc->sg);
222 kfree(desc);
223}
224
225static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
226 struct dma_chan *chan, struct scatterlist *sgl,
227 unsigned int sg_len, enum dma_transfer_direction direction,
228 unsigned long flags, void *context)
229{
230 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
231 struct hsu_dma_desc *desc;
232 struct scatterlist *sg;
233 unsigned int i;
234
235 desc = hsu_dma_alloc_desc(sg_len);
236 if (!desc)
237 return NULL;
238
239 for_each_sg(sgl, sg, sg_len, i) {
240 desc->sg[i].addr = sg_dma_address(sg);
241 desc->sg[i].len = sg_dma_len(sg);
242 }
243
244 desc->nents = sg_len;
245 desc->direction = direction;
246 desc->active = 0;
247 desc->status = DMA_IN_PROGRESS;
248
249 return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
250}
251
252static void hsu_dma_issue_pending(struct dma_chan *chan)
253{
254 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
255 unsigned long flags;
256
257 spin_lock_irqsave(&hsuc->vchan.lock, flags);
258 if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
259 hsu_dma_start_transfer(hsuc);
260 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
261}
262
263static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
264{
265 size_t bytes = 0;
266 unsigned int i;
267
268 for (i = desc->active; i < desc->nents; i++)
269 bytes += desc->sg[i].len;
270
271 return bytes;
272}
273
274static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
275{
276 struct hsu_dma_desc *desc = hsuc->desc;
277 size_t bytes = hsu_dma_desc_size(desc);
278 int i;
279 unsigned long flags;
280
281 spin_lock_irqsave(&hsuc->lock, flags);
282 i = desc->active % HSU_DMA_CHAN_NR_DESC;
283 do {
284 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
285 } while (--i >= 0);
286 spin_unlock_irqrestore(&hsuc->lock, flags);
287
288 return bytes;
289}
290
291static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
292 dma_cookie_t cookie, struct dma_tx_state *state)
293{
294 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
295 struct virt_dma_desc *vdesc;
296 enum dma_status status;
297 size_t bytes;
298 unsigned long flags;
299
300 status = dma_cookie_status(chan, cookie, state);
301 if (status == DMA_COMPLETE)
302 return status;
303
304 spin_lock_irqsave(&hsuc->vchan.lock, flags);
305 vdesc = vchan_find_desc(&hsuc->vchan, cookie);
306 if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
307 bytes = hsu_dma_active_desc_size(hsuc);
308 dma_set_residue(state, bytes);
309 status = hsuc->desc->status;
310 } else if (vdesc) {
311 bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc));
312 dma_set_residue(state, bytes);
313 }
314 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
315
316 return status;
317}
318
319static int hsu_dma_slave_config(struct dma_chan *chan,
320 struct dma_slave_config *config)
321{
322 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
323
324 /* Check if chan will be configured for slave transfers */
325 if (!is_slave_direction(config->direction))
326 return -EINVAL;
327
328 memcpy(&hsuc->config, config, sizeof(hsuc->config));
329
330 return 0;
331}
332
333static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc)
334{
335 unsigned long flags;
336
337 spin_lock_irqsave(&hsuc->lock, flags);
338 hsu_chan_disable(hsuc);
339 spin_unlock_irqrestore(&hsuc->lock, flags);
340}
341
342static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc)
343{
344 unsigned long flags;
345
346 spin_lock_irqsave(&hsuc->lock, flags);
347 hsu_chan_enable(hsuc);
348 spin_unlock_irqrestore(&hsuc->lock, flags);
349}
350
351static int hsu_dma_pause(struct dma_chan *chan)
352{
353 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
354 unsigned long flags;
355
356 spin_lock_irqsave(&hsuc->vchan.lock, flags);
357 if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
358 hsu_dma_chan_deactivate(hsuc);
359 hsuc->desc->status = DMA_PAUSED;
360 }
361 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
362
363 return 0;
364}
365
366static int hsu_dma_resume(struct dma_chan *chan)
367{
368 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
369 unsigned long flags;
370
371 spin_lock_irqsave(&hsuc->vchan.lock, flags);
372 if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
373 hsuc->desc->status = DMA_IN_PROGRESS;
374 hsu_dma_chan_activate(hsuc);
375 }
376 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
377
378 return 0;
379}
380
381static int hsu_dma_terminate_all(struct dma_chan *chan)
382{
383 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
384 unsigned long flags;
385 LIST_HEAD(head);
386
387 spin_lock_irqsave(&hsuc->vchan.lock, flags);
388
389 hsu_dma_stop_channel(hsuc);
390 hsuc->desc = NULL;
391
392 vchan_get_all_descriptors(&hsuc->vchan, &head);
393 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
394 vchan_dma_desc_free_list(&hsuc->vchan, &head);
395
396 return 0;
397}
398
399static int hsu_dma_alloc_chan_resources(struct dma_chan *chan)
400{
401 return 0;
402}
403
404static void hsu_dma_free_chan_resources(struct dma_chan *chan)
405{
406 vchan_free_chan_resources(to_virt_chan(chan));
407}
408
409int hsu_dma_probe(struct hsu_dma_chip *chip)
410{
411 struct hsu_dma *hsu;
412 struct hsu_dma_platform_data *pdata = chip->pdata;
413 void __iomem *addr = chip->regs + chip->offset;
414 unsigned short i;
415 int ret;
416
417 hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
418 if (!hsu)
419 return -ENOMEM;
420
421 chip->hsu = hsu;
422
423 if (!pdata) {
424 pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
425 if (!pdata)
426 return -ENOMEM;
427
428 chip->pdata = pdata;
429
430 /* Guess nr_channels from the IO space length */
431 pdata->nr_channels = (chip->length - chip->offset) /
432 HSU_DMA_CHAN_LENGTH;
433 }
434
435 hsu->chan = devm_kcalloc(chip->dev, pdata->nr_channels,
436 sizeof(*hsu->chan), GFP_KERNEL);
437 if (!hsu->chan)
438 return -ENOMEM;
439
440 INIT_LIST_HEAD(&hsu->dma.channels);
441 for (i = 0; i < pdata->nr_channels; i++) {
442 struct hsu_dma_chan *hsuc = &hsu->chan[i];
443
444 hsuc->vchan.desc_free = hsu_dma_desc_free;
445 vchan_init(&hsuc->vchan, &hsu->dma);
446
447 hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
448 hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
449
450 spin_lock_init(&hsuc->lock);
451 }
452
453 dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
454 dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
455
456 hsu->dma.device_alloc_chan_resources = hsu_dma_alloc_chan_resources;
457 hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
458
459 hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
460
461 hsu->dma.device_issue_pending = hsu_dma_issue_pending;
462 hsu->dma.device_tx_status = hsu_dma_tx_status;
463
464 hsu->dma.device_config = hsu_dma_slave_config;
465 hsu->dma.device_pause = hsu_dma_pause;
466 hsu->dma.device_resume = hsu_dma_resume;
467 hsu->dma.device_terminate_all = hsu_dma_terminate_all;
468
469 hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
470 hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
471 hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
472 hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
473
474 hsu->dma.dev = chip->dev;
475
476 ret = dma_async_device_register(&hsu->dma);
477 if (ret)
478 return ret;
479
480 dev_info(chip->dev, "Found HSU DMA, %d channels\n", pdata->nr_channels);
481 return 0;
482}
483EXPORT_SYMBOL_GPL(hsu_dma_probe);
484
485int hsu_dma_remove(struct hsu_dma_chip *chip)
486{
487 struct hsu_dma *hsu = chip->hsu;
488 unsigned short i;
489
490 dma_async_device_unregister(&hsu->dma);
491
492 for (i = 0; i < chip->pdata->nr_channels; i++) {
493 struct hsu_dma_chan *hsuc = &hsu->chan[i];
494
495 tasklet_kill(&hsuc->vchan.task);
496 }
497
498 return 0;
499}
500EXPORT_SYMBOL_GPL(hsu_dma_remove);
501
502MODULE_LICENSE("GPL v2");
503MODULE_DESCRIPTION("High Speed UART DMA core driver");
504MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
new file mode 100644
index 000000000000..0275233cf550
--- /dev/null
+++ b/drivers/dma/hsu/hsu.h
@@ -0,0 +1,118 @@
1/*
2 * Driver for the High Speed UART DMA
3 *
4 * Copyright (C) 2015 Intel Corporation
5 *
6 * Partially based on the bits found in drivers/tty/serial/mfd.c.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __DMA_HSU_H__
14#define __DMA_HSU_H__
15
16#include <linux/spinlock.h>
17#include <linux/dma/hsu.h>
18
19#include "../virt-dma.h"
20
21#define HSU_CH_SR 0x00 /* channel status */
22#define HSU_CH_CR 0x04 /* channel control */
23#define HSU_CH_DCR 0x08 /* descriptor control */
24#define HSU_CH_BSR 0x10 /* FIFO buffer size */
25#define HSU_CH_MTSR 0x14 /* minimum transfer size */
26#define HSU_CH_DxSAR(x) (0x20 + 8 * (x)) /* desc start addr */
27#define HSU_CH_DxTSR(x) (0x24 + 8 * (x)) /* desc transfer size */
28#define HSU_CH_D0SAR 0x20 /* desc 0 start addr */
29#define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */
30#define HSU_CH_D1SAR 0x28
31#define HSU_CH_D1TSR 0x2c
32#define HSU_CH_D2SAR 0x30
33#define HSU_CH_D2TSR 0x34
34#define HSU_CH_D3SAR 0x38
35#define HSU_CH_D3TSR 0x3c
36
37#define HSU_DMA_CHAN_NR_DESC 4
38#define HSU_DMA_CHAN_LENGTH 0x40
39
40/* Bits in HSU_CH_SR */
41#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
42#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
43#define HSU_CH_SR_CHE BIT(15)
44
45/* Bits in HSU_CH_CR */
46#define HSU_CH_CR_CHA BIT(0)
47#define HSU_CH_CR_CHD BIT(1)
48
49/* Bits in HSU_CH_DCR */
50#define HSU_CH_DCR_DESCA(x) BIT(0 + (x))
51#define HSU_CH_DCR_CHSOD(x) BIT(8 + (x))
52#define HSU_CH_DCR_CHSOTO BIT(14)
53#define HSU_CH_DCR_CHSOE BIT(15)
54#define HSU_CH_DCR_CHDI(x) BIT(16 + (x))
55#define HSU_CH_DCR_CHEI BIT(23)
56#define HSU_CH_DCR_CHTOI(x) BIT(24 + (x))
57
58struct hsu_dma_sg {
59 dma_addr_t addr;
60 unsigned int len;
61};
62
63struct hsu_dma_desc {
64 struct virt_dma_desc vdesc;
65 enum dma_transfer_direction direction;
66 struct hsu_dma_sg *sg;
67 unsigned int nents;
68 unsigned int active;
69 enum dma_status status;
70};
71
72static inline struct hsu_dma_desc *to_hsu_dma_desc(struct virt_dma_desc *vdesc)
73{
74 return container_of(vdesc, struct hsu_dma_desc, vdesc);
75}
76
77struct hsu_dma_chan {
78 struct virt_dma_chan vchan;
79
80 void __iomem *reg;
81 spinlock_t lock;
82
83 /* hardware configuration */
84 enum dma_transfer_direction direction;
85 struct dma_slave_config config;
86
87 struct hsu_dma_desc *desc;
88};
89
90static inline struct hsu_dma_chan *to_hsu_dma_chan(struct dma_chan *chan)
91{
92 return container_of(chan, struct hsu_dma_chan, vchan.chan);
93}
94
95static inline u32 hsu_chan_readl(struct hsu_dma_chan *hsuc, int offset)
96{
97 return readl(hsuc->reg + offset);
98}
99
100static inline void hsu_chan_writel(struct hsu_dma_chan *hsuc, int offset,
101 u32 value)
102{
103 writel(value, hsuc->reg + offset);
104}
105
106struct hsu_dma {
107 struct dma_device dma;
108
109 /* channels */
110 struct hsu_dma_chan *chan;
111};
112
113static inline struct hsu_dma *to_hsu_dma(struct dma_device *ddev)
114{
115 return container_of(ddev, struct hsu_dma, dma);
116}
117
118#endif /* __DMA_HSU_H__ */
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
new file mode 100644
index 000000000000..563b4685d766
--- /dev/null
+++ b/drivers/dma/hsu/pci.c
@@ -0,0 +1,123 @@
1/*
2 * PCI driver for the High Speed UART DMA
3 *
4 * Copyright (C) 2015 Intel Corporation
5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
6 *
7 * Partially based on the bits found in drivers/tty/serial/mfd.c.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/bitops.h>
15#include <linux/device.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18
19#include "hsu.h"
20
21#define HSU_PCI_DMASR 0x00
22#define HSU_PCI_DMAISR 0x04
23
24#define HSU_PCI_CHAN_OFFSET 0x100
25
26static irqreturn_t hsu_pci_irq(int irq, void *dev)
27{
28 struct hsu_dma_chip *chip = dev;
29 u32 dmaisr;
30 unsigned short i;
31 irqreturn_t ret = IRQ_NONE;
32
33 dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
34 for (i = 0; i < chip->pdata->nr_channels; i++) {
35 if (dmaisr & 0x1)
36 ret |= hsu_dma_irq(chip, i);
37 dmaisr >>= 1;
38 }
39
40 return ret;
41}
42
43static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
44{
45 struct hsu_dma_chip *chip;
46 int ret;
47
48 ret = pcim_enable_device(pdev);
49 if (ret)
50 return ret;
51
52 ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
53 if (ret) {
54 dev_err(&pdev->dev, "I/O memory remapping failed\n");
55 return ret;
56 }
57
58 pci_set_master(pdev);
59 pci_try_set_mwi(pdev);
60
61 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
62 if (ret)
63 return ret;
64
65 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
66 if (ret)
67 return ret;
68
69 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
70 if (!chip)
71 return -ENOMEM;
72
73 chip->dev = &pdev->dev;
74 chip->regs = pcim_iomap_table(pdev)[0];
75 chip->length = pci_resource_len(pdev, 0);
76 chip->offset = HSU_PCI_CHAN_OFFSET;
77 chip->irq = pdev->irq;
78
79 pci_enable_msi(pdev);
80
81 ret = hsu_dma_probe(chip);
82 if (ret)
83 return ret;
84
85 ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
86 if (ret)
87 goto err_register_irq;
88
89 pci_set_drvdata(pdev, chip);
90
91 return 0;
92
93err_register_irq:
94 hsu_dma_remove(chip);
95 return ret;
96}
97
98static void hsu_pci_remove(struct pci_dev *pdev)
99{
100 struct hsu_dma_chip *chip = pci_get_drvdata(pdev);
101
102 free_irq(chip->irq, chip);
103 hsu_dma_remove(chip);
104}
105
106static const struct pci_device_id hsu_pci_id_table[] = {
107 { PCI_VDEVICE(INTEL, 0x081e), 0 },
108 { }
109};
110MODULE_DEVICE_TABLE(pci, hsu_pci_id_table);
111
112static struct pci_driver hsu_pci_driver = {
113 .name = "hsu_dma_pci",
114 .id_table = hsu_pci_id_table,
115 .probe = hsu_pci_probe,
116 .remove = hsu_pci_remove,
117};
118
119module_pci_driver(hsu_pci_driver);
120
121MODULE_LICENSE("GPL v2");
122MODULE_DESCRIPTION("High Speed UART DMA PCI driver");
123MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
new file mode 100644
index 000000000000..234393a6997b
--- /dev/null
+++ b/include/linux/dma/hsu.h
@@ -0,0 +1,48 @@
1/*
2 * Driver for the High Speed UART DMA
3 *
4 * Copyright (C) 2015 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef _DMA_HSU_H
12#define _DMA_HSU_H
13
14#include <linux/device.h>
15#include <linux/interrupt.h>
16
17#include <linux/platform_data/dma-hsu.h>
18
19struct hsu_dma;
20
21/**
22 * struct hsu_dma_chip - representation of HSU DMA hardware
23 * @dev: struct device of the DMA controller
24 * @irq: irq line
25 * @regs: memory mapped I/O space
26 * @length: I/O space length
27 * @offset: offset of the I/O space where registers are located
28 * @hsu: struct hsu_dma that is filed by ->probe()
29 * @pdata: platform data for the DMA controller if provided
30 */
31struct hsu_dma_chip {
32 struct device *dev;
33 int irq;
34 void __iomem *regs;
35 unsigned int length;
36 unsigned int offset;
37 struct hsu_dma *hsu;
38 struct hsu_dma_platform_data *pdata;
39};
40
41/* Export to the internal users */
42irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr);
43
44/* Export to the platform drivers */
45int hsu_dma_probe(struct hsu_dma_chip *chip);
46int hsu_dma_remove(struct hsu_dma_chip *chip);
47
48#endif /* _DMA_HSU_H */
diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h
new file mode 100644
index 000000000000..8a1f6a4920b2
--- /dev/null
+++ b/include/linux/platform_data/dma-hsu.h
@@ -0,0 +1,25 @@
1/*
2 * Driver for the High Speed UART DMA
3 *
4 * Copyright (C) 2015 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef _PLATFORM_DATA_DMA_HSU_H
12#define _PLATFORM_DATA_DMA_HSU_H
13
14#include <linux/device.h>
15
16struct hsu_dma_slave {
17 struct device *dma_dev;
18 int chan_id;
19};
20
21struct hsu_dma_platform_data {
22 unsigned short nr_channels;
23};
24
25#endif /* _PLATFORM_DATA_DMA_HSU_H */