diff options
Diffstat (limited to 'drivers/dma/hsu/hsu.c')
-rw-r--r-- | drivers/dma/hsu/hsu.c | 495 |
1 files changed, 495 insertions, 0 deletions
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c new file mode 100644 index 000000000000..9b84def7a353 --- /dev/null +++ b/drivers/dma/hsu/hsu.c | |||
@@ -0,0 +1,495 @@ | |||
1 | /* | ||
2 | * Core driver for the High Speed UART DMA | ||
3 | * | ||
4 | * Copyright (C) 2015 Intel Corporation | ||
5 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
6 | * | ||
7 | * Partially based on the bits found in drivers/tty/serial/mfd.c. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * DMA channel allocation: | ||
16 | * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA | ||
17 | * Write (UART RX). | ||
18 | * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to | ||
19 | * port 3, and so on. | ||
20 | */ | ||
21 | |||
22 | #include <linux/delay.h> | ||
23 | #include <linux/dmaengine.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/slab.h> | ||
28 | |||
29 | #include "hsu.h" | ||
30 | |||
31 | #define HSU_DMA_BUSWIDTHS \ | ||
32 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | ||
33 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
34 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
35 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ | ||
36 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | ||
37 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \ | ||
38 | BIT(DMA_SLAVE_BUSWIDTH_16_BYTES) | ||
39 | |||
40 | static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc) | ||
41 | { | ||
42 | hsu_chan_writel(hsuc, HSU_CH_CR, 0); | ||
43 | } | ||
44 | |||
45 | static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc) | ||
46 | { | ||
47 | u32 cr = HSU_CH_CR_CHA; | ||
48 | |||
49 | if (hsuc->direction == DMA_MEM_TO_DEV) | ||
50 | cr &= ~HSU_CH_CR_CHD; | ||
51 | else if (hsuc->direction == DMA_DEV_TO_MEM) | ||
52 | cr |= HSU_CH_CR_CHD; | ||
53 | |||
54 | hsu_chan_writel(hsuc, HSU_CH_CR, cr); | ||
55 | } | ||
56 | |||
57 | static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) | ||
58 | { | ||
59 | struct dma_slave_config *config = &hsuc->config; | ||
60 | struct hsu_dma_desc *desc = hsuc->desc; | ||
61 | u32 bsr = 0, mtsr = 0; /* to shut the compiler up */ | ||
62 | u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI; | ||
63 | unsigned int i, count; | ||
64 | |||
65 | if (hsuc->direction == DMA_MEM_TO_DEV) { | ||
66 | bsr = config->dst_maxburst; | ||
67 | mtsr = config->dst_addr_width; | ||
68 | } else if (hsuc->direction == DMA_DEV_TO_MEM) { | ||
69 | bsr = config->src_maxburst; | ||
70 | mtsr = config->src_addr_width; | ||
71 | } | ||
72 | |||
73 | hsu_chan_disable(hsuc); | ||
74 | |||
75 | hsu_chan_writel(hsuc, HSU_CH_DCR, 0); | ||
76 | hsu_chan_writel(hsuc, HSU_CH_BSR, bsr); | ||
77 | hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr); | ||
78 | |||
79 | /* Set descriptors */ | ||
80 | count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC; | ||
81 | for (i = 0; i < count; i++) { | ||
82 | hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); | ||
83 | hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); | ||
84 | |||
85 | /* Prepare value for DCR */ | ||
86 | dcr |= HSU_CH_DCR_DESCA(i); | ||
87 | dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */ | ||
88 | |||
89 | desc->active++; | ||
90 | } | ||
91 | /* Only for the last descriptor in the chain */ | ||
92 | dcr |= HSU_CH_DCR_CHSOD(count - 1); | ||
93 | dcr |= HSU_CH_DCR_CHDI(count - 1); | ||
94 | |||
95 | hsu_chan_writel(hsuc, HSU_CH_DCR, dcr); | ||
96 | |||
97 | hsu_chan_enable(hsuc); | ||
98 | } | ||
99 | |||
100 | static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc) | ||
101 | { | ||
102 | unsigned long flags; | ||
103 | |||
104 | spin_lock_irqsave(&hsuc->lock, flags); | ||
105 | hsu_chan_disable(hsuc); | ||
106 | hsu_chan_writel(hsuc, HSU_CH_DCR, 0); | ||
107 | spin_unlock_irqrestore(&hsuc->lock, flags); | ||
108 | } | ||
109 | |||
110 | static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc) | ||
111 | { | ||
112 | unsigned long flags; | ||
113 | |||
114 | spin_lock_irqsave(&hsuc->lock, flags); | ||
115 | hsu_dma_chan_start(hsuc); | ||
116 | spin_unlock_irqrestore(&hsuc->lock, flags); | ||
117 | } | ||
118 | |||
119 | static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc) | ||
120 | { | ||
121 | struct virt_dma_desc *vdesc; | ||
122 | |||
123 | /* Get the next descriptor */ | ||
124 | vdesc = vchan_next_desc(&hsuc->vchan); | ||
125 | if (!vdesc) { | ||
126 | hsuc->desc = NULL; | ||
127 | return; | ||
128 | } | ||
129 | |||
130 | list_del(&vdesc->node); | ||
131 | hsuc->desc = to_hsu_dma_desc(vdesc); | ||
132 | |||
133 | /* Start the channel with a new descriptor */ | ||
134 | hsu_dma_start_channel(hsuc); | ||
135 | } | ||
136 | |||
137 | static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc) | ||
138 | { | ||
139 | unsigned long flags; | ||
140 | u32 sr; | ||
141 | |||
142 | spin_lock_irqsave(&hsuc->lock, flags); | ||
143 | sr = hsu_chan_readl(hsuc, HSU_CH_SR); | ||
144 | spin_unlock_irqrestore(&hsuc->lock, flags); | ||
145 | |||
146 | return sr; | ||
147 | } | ||
148 | |||
149 | irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) | ||
150 | { | ||
151 | struct hsu_dma_chan *hsuc; | ||
152 | struct hsu_dma_desc *desc; | ||
153 | unsigned long flags; | ||
154 | u32 sr; | ||
155 | |||
156 | /* Sanity check */ | ||
157 | if (nr >= chip->pdata->nr_channels) | ||
158 | return IRQ_NONE; | ||
159 | |||
160 | hsuc = &chip->hsu->chan[nr]; | ||
161 | |||
162 | /* | ||
163 | * No matter what situation, need read clear the IRQ status | ||
164 | * There is a bug, see Errata 5, HSD 2900918 | ||
165 | */ | ||
166 | sr = hsu_dma_chan_get_sr(hsuc); | ||
167 | if (!sr) | ||
168 | return IRQ_NONE; | ||
169 | |||
170 | /* Timeout IRQ, need wait some time, see Errata 2 */ | ||
171 | if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY)) | ||
172 | udelay(2); | ||
173 | |||
174 | sr &= ~HSU_CH_SR_DESCTO_ANY; | ||
175 | if (!sr) | ||
176 | return IRQ_HANDLED; | ||
177 | |||
178 | spin_lock_irqsave(&hsuc->vchan.lock, flags); | ||
179 | desc = hsuc->desc; | ||
180 | if (desc) { | ||
181 | if (sr & HSU_CH_SR_CHE) { | ||
182 | desc->status = DMA_ERROR; | ||
183 | } else if (desc->active < desc->nents) { | ||
184 | hsu_dma_start_channel(hsuc); | ||
185 | } else { | ||
186 | vchan_cookie_complete(&desc->vdesc); | ||
187 | desc->status = DMA_COMPLETE; | ||
188 | hsu_dma_start_transfer(hsuc); | ||
189 | } | ||
190 | } | ||
191 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); | ||
192 | |||
193 | return IRQ_HANDLED; | ||
194 | } | ||
195 | EXPORT_SYMBOL_GPL(hsu_dma_irq); | ||
196 | |||
197 | static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents) | ||
198 | { | ||
199 | struct hsu_dma_desc *desc; | ||
200 | |||
201 | desc = kzalloc(sizeof(*desc), GFP_NOWAIT); | ||
202 | if (!desc) | ||
203 | return NULL; | ||
204 | |||
205 | desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT); | ||
206 | if (!desc->sg) { | ||
207 | kfree(desc); | ||
208 | return NULL; | ||
209 | } | ||
210 | |||
211 | return desc; | ||
212 | } | ||
213 | |||
214 | static void hsu_dma_desc_free(struct virt_dma_desc *vdesc) | ||
215 | { | ||
216 | struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc); | ||
217 | |||
218 | kfree(desc->sg); | ||
219 | kfree(desc); | ||
220 | } | ||
221 | |||
222 | static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg( | ||
223 | struct dma_chan *chan, struct scatterlist *sgl, | ||
224 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
225 | unsigned long flags, void *context) | ||
226 | { | ||
227 | struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); | ||
228 | struct hsu_dma_desc *desc; | ||
229 | struct scatterlist *sg; | ||
230 | unsigned int i; | ||
231 | |||
232 | desc = hsu_dma_alloc_desc(sg_len); | ||
233 | if (!desc) | ||
234 | return NULL; | ||
235 | |||
236 | for_each_sg(sgl, sg, sg_len, i) { | ||
237 | desc->sg[i].addr = sg_dma_address(sg); | ||
238 | desc->sg[i].len = sg_dma_len(sg); | ||
239 | } | ||
240 | |||
241 | desc->nents = sg_len; | ||
242 | desc->direction = direction; | ||
243 | /* desc->active = 0 by kzalloc */ | ||
244 | desc->status = DMA_IN_PROGRESS; | ||
245 | |||
246 | return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags); | ||
247 | } | ||
248 | |||
249 | static void hsu_dma_issue_pending(struct dma_chan *chan) | ||
250 | { | ||
251 | struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); | ||
252 | unsigned long flags; | ||
253 | |||
254 | spin_lock_irqsave(&hsuc->vchan.lock, flags); | ||
255 | if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc) | ||
256 | hsu_dma_start_transfer(hsuc); | ||
257 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); | ||
258 | } | ||
259 | |||
260 | static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc) | ||
261 | { | ||
262 | size_t bytes = 0; | ||
263 | unsigned int i; | ||
264 | |||
265 | for (i = desc->active; i < desc->nents; i++) | ||
266 | bytes += desc->sg[i].len; | ||
267 | |||
268 | return bytes; | ||
269 | } | ||
270 | |||
271 | static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) | ||
272 | { | ||
273 | struct hsu_dma_desc *desc = hsuc->desc; | ||
274 | size_t bytes = hsu_dma_desc_size(desc); | ||
275 | int i; | ||
276 | unsigned long flags; | ||
277 | |||
278 | spin_lock_irqsave(&hsuc->lock, flags); | ||
279 | i = desc->active % HSU_DMA_CHAN_NR_DESC; | ||
280 | do { | ||
281 | bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); | ||
282 | } while (--i >= 0); | ||
283 | spin_unlock_irqrestore(&hsuc->lock, flags); | ||
284 | |||
285 | return bytes; | ||
286 | } | ||
287 | |||
288 | static enum dma_status hsu_dma_tx_status(struct dma_chan *chan, | ||
289 | dma_cookie_t cookie, struct dma_tx_state *state) | ||
290 | { | ||
291 | struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); | ||
292 | struct virt_dma_desc *vdesc; | ||
293 | enum dma_status status; | ||
294 | size_t bytes; | ||
295 | unsigned long flags; | ||
296 | |||
297 | status = dma_cookie_status(chan, cookie, state); | ||
298 | if (status == DMA_COMPLETE) | ||
299 | return status; | ||
300 | |||
301 | spin_lock_irqsave(&hsuc->vchan.lock, flags); | ||
302 | vdesc = vchan_find_desc(&hsuc->vchan, cookie); | ||
303 | if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) { | ||
304 | bytes = hsu_dma_active_desc_size(hsuc); | ||
305 | dma_set_residue(state, bytes); | ||
306 | status = hsuc->desc->status; | ||
307 | } else if (vdesc) { | ||
308 | bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc)); | ||
309 | dma_set_residue(state, bytes); | ||
310 | } | ||
311 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); | ||
312 | |||
313 | return status; | ||
314 | } | ||
315 | |||
316 | static int hsu_dma_slave_config(struct dma_chan *chan, | ||
317 | struct dma_slave_config *config) | ||
318 | { | ||
319 | struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); | ||
320 | |||
321 | /* Check if chan will be configured for slave transfers */ | ||
322 | if (!is_slave_direction(config->direction)) | ||
323 | return -EINVAL; | ||
324 | |||
325 | memcpy(&hsuc->config, config, sizeof(hsuc->config)); | ||
326 | |||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc) | ||
331 | { | ||
332 | unsigned long flags; | ||
333 | |||
334 | spin_lock_irqsave(&hsuc->lock, flags); | ||
335 | hsu_chan_disable(hsuc); | ||
336 | spin_unlock_irqrestore(&hsuc->lock, flags); | ||
337 | } | ||
338 | |||
339 | static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc) | ||
340 | { | ||
341 | unsigned long flags; | ||
342 | |||
343 | spin_lock_irqsave(&hsuc->lock, flags); | ||
344 | hsu_chan_enable(hsuc); | ||
345 | spin_unlock_irqrestore(&hsuc->lock, flags); | ||
346 | } | ||
347 | |||
348 | static int hsu_dma_pause(struct dma_chan *chan) | ||
349 | { | ||
350 | struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); | ||
351 | unsigned long flags; | ||
352 | |||
353 | spin_lock_irqsave(&hsuc->vchan.lock, flags); | ||
354 | if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) { | ||
355 | hsu_dma_chan_deactivate(hsuc); | ||
356 | hsuc->desc->status = DMA_PAUSED; | ||
357 | } | ||
358 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static int hsu_dma_resume(struct dma_chan *chan) | ||
364 | { | ||
365 | struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); | ||
366 | unsigned long flags; | ||
367 | |||
368 | spin_lock_irqsave(&hsuc->vchan.lock, flags); | ||
369 | if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) { | ||
370 | hsuc->desc->status = DMA_IN_PROGRESS; | ||
371 | hsu_dma_chan_activate(hsuc); | ||
372 | } | ||
373 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | static int hsu_dma_terminate_all(struct dma_chan *chan) | ||
379 | { | ||
380 | struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); | ||
381 | unsigned long flags; | ||
382 | LIST_HEAD(head); | ||
383 | |||
384 | spin_lock_irqsave(&hsuc->vchan.lock, flags); | ||
385 | |||
386 | hsu_dma_stop_channel(hsuc); | ||
387 | hsuc->desc = NULL; | ||
388 | |||
389 | vchan_get_all_descriptors(&hsuc->vchan, &head); | ||
390 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); | ||
391 | vchan_dma_desc_free_list(&hsuc->vchan, &head); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | static void hsu_dma_free_chan_resources(struct dma_chan *chan) | ||
397 | { | ||
398 | vchan_free_chan_resources(to_virt_chan(chan)); | ||
399 | } | ||
400 | |||
401 | int hsu_dma_probe(struct hsu_dma_chip *chip) | ||
402 | { | ||
403 | struct hsu_dma *hsu; | ||
404 | struct hsu_dma_platform_data *pdata = chip->pdata; | ||
405 | void __iomem *addr = chip->regs + chip->offset; | ||
406 | unsigned short i; | ||
407 | int ret; | ||
408 | |||
409 | hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL); | ||
410 | if (!hsu) | ||
411 | return -ENOMEM; | ||
412 | |||
413 | chip->hsu = hsu; | ||
414 | |||
415 | if (!pdata) { | ||
416 | pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); | ||
417 | if (!pdata) | ||
418 | return -ENOMEM; | ||
419 | |||
420 | chip->pdata = pdata; | ||
421 | |||
422 | /* Guess nr_channels from the IO space length */ | ||
423 | pdata->nr_channels = (chip->length - chip->offset) / | ||
424 | HSU_DMA_CHAN_LENGTH; | ||
425 | } | ||
426 | |||
427 | hsu->chan = devm_kcalloc(chip->dev, pdata->nr_channels, | ||
428 | sizeof(*hsu->chan), GFP_KERNEL); | ||
429 | if (!hsu->chan) | ||
430 | return -ENOMEM; | ||
431 | |||
432 | INIT_LIST_HEAD(&hsu->dma.channels); | ||
433 | for (i = 0; i < pdata->nr_channels; i++) { | ||
434 | struct hsu_dma_chan *hsuc = &hsu->chan[i]; | ||
435 | |||
436 | hsuc->vchan.desc_free = hsu_dma_desc_free; | ||
437 | vchan_init(&hsuc->vchan, &hsu->dma); | ||
438 | |||
439 | hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; | ||
440 | hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH; | ||
441 | |||
442 | spin_lock_init(&hsuc->lock); | ||
443 | } | ||
444 | |||
445 | dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask); | ||
446 | dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask); | ||
447 | |||
448 | hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources; | ||
449 | |||
450 | hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg; | ||
451 | |||
452 | hsu->dma.device_issue_pending = hsu_dma_issue_pending; | ||
453 | hsu->dma.device_tx_status = hsu_dma_tx_status; | ||
454 | |||
455 | hsu->dma.device_config = hsu_dma_slave_config; | ||
456 | hsu->dma.device_pause = hsu_dma_pause; | ||
457 | hsu->dma.device_resume = hsu_dma_resume; | ||
458 | hsu->dma.device_terminate_all = hsu_dma_terminate_all; | ||
459 | |||
460 | hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS; | ||
461 | hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS; | ||
462 | hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
463 | hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
464 | |||
465 | hsu->dma.dev = chip->dev; | ||
466 | |||
467 | ret = dma_async_device_register(&hsu->dma); | ||
468 | if (ret) | ||
469 | return ret; | ||
470 | |||
471 | dev_info(chip->dev, "Found HSU DMA, %d channels\n", pdata->nr_channels); | ||
472 | return 0; | ||
473 | } | ||
474 | EXPORT_SYMBOL_GPL(hsu_dma_probe); | ||
475 | |||
476 | int hsu_dma_remove(struct hsu_dma_chip *chip) | ||
477 | { | ||
478 | struct hsu_dma *hsu = chip->hsu; | ||
479 | unsigned short i; | ||
480 | |||
481 | dma_async_device_unregister(&hsu->dma); | ||
482 | |||
483 | for (i = 0; i < chip->pdata->nr_channels; i++) { | ||
484 | struct hsu_dma_chan *hsuc = &hsu->chan[i]; | ||
485 | |||
486 | tasklet_kill(&hsuc->vchan.task); | ||
487 | } | ||
488 | |||
489 | return 0; | ||
490 | } | ||
491 | EXPORT_SYMBOL_GPL(hsu_dma_remove); | ||
492 | |||
493 | MODULE_LICENSE("GPL v2"); | ||
494 | MODULE_DESCRIPTION("High Speed UART DMA core driver"); | ||
495 | MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); | ||