aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/intel_mid_dma.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-01-13 01:06:28 -0500
committerPaul Mundt <lethal@linux-sh.org>2011-01-13 01:06:28 -0500
commitf43dc23d5ea91fca257be02138a255f02d98e806 (patch)
treeb29722f6e965316e90ac97abf79923ced250dc21 /drivers/dma/intel_mid_dma.c
parentf8e53553f452dcbf67cb89c8cba63a1cd6eb4cc0 (diff)
parent4162cf64973df51fc885825bc9ca4d055891c49f (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into common/serial-rework
Conflicts: arch/sh/kernel/cpu/sh2/setup-sh7619.c arch/sh/kernel/cpu/sh2a/setup-mxg.c arch/sh/kernel/cpu/sh2a/setup-sh7201.c arch/sh/kernel/cpu/sh2a/setup-sh7203.c arch/sh/kernel/cpu/sh2a/setup-sh7206.c arch/sh/kernel/cpu/sh3/setup-sh7705.c arch/sh/kernel/cpu/sh3/setup-sh770x.c arch/sh/kernel/cpu/sh3/setup-sh7710.c arch/sh/kernel/cpu/sh3/setup-sh7720.c arch/sh/kernel/cpu/sh4/setup-sh4-202.c arch/sh/kernel/cpu/sh4/setup-sh7750.c arch/sh/kernel/cpu/sh4/setup-sh7760.c arch/sh/kernel/cpu/sh4a/setup-sh7343.c arch/sh/kernel/cpu/sh4a/setup-sh7366.c arch/sh/kernel/cpu/sh4a/setup-sh7722.c arch/sh/kernel/cpu/sh4a/setup-sh7723.c arch/sh/kernel/cpu/sh4a/setup-sh7724.c arch/sh/kernel/cpu/sh4a/setup-sh7763.c arch/sh/kernel/cpu/sh4a/setup-sh7770.c arch/sh/kernel/cpu/sh4a/setup-sh7780.c arch/sh/kernel/cpu/sh4a/setup-sh7785.c arch/sh/kernel/cpu/sh4a/setup-sh7786.c arch/sh/kernel/cpu/sh4a/setup-shx3.c arch/sh/kernel/cpu/sh5/setup-sh5.c drivers/serial/sh-sci.c drivers/serial/sh-sci.h include/linux/serial_sci.h
Diffstat (limited to 'drivers/dma/intel_mid_dma.c')
-rw-r--r--drivers/dma/intel_mid_dma.c1445
1 files changed, 1445 insertions, 0 deletions
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
new file mode 100644
index 000000000000..3109bd94bc4f
--- /dev/null
+++ b/drivers/dma/intel_mid_dma.c
@@ -0,0 +1,1445 @@
1/*
2 * intel_mid_dma.c - Intel Langwell DMA Drivers
3 *
4 * Copyright (C) 2008-10 Intel Corp
5 * Author: Vinod Koul <vinod.koul@intel.com>
6 * The driver design is based on dw_dmac driver
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 *
25 */
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28#include <linux/pm_runtime.h>
29#include <linux/intel_mid_dma.h>
30
31#define MAX_CHAN 4 /*max ch across controllers*/
32#include "intel_mid_dma_regs.h"
33
34#define INTEL_MID_DMAC1_ID 0x0814
35#define INTEL_MID_DMAC2_ID 0x0813
36#define INTEL_MID_GP_DMAC2_ID 0x0827
37#define INTEL_MFLD_DMAC1_ID 0x0830
38#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
39#define LNW_PERIPHRAL_MASK_SIZE 0x10
40#define LNW_PERIPHRAL_STATUS 0x0
41#define LNW_PERIPHRAL_MASK 0x8
42
43struct intel_mid_dma_probe_info {
44 u8 max_chan;
45 u8 ch_base;
46 u16 block_size;
47 u32 pimr_mask;
48};
49
50#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
51 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
52 .max_chan = (_max_chan), \
53 .ch_base = (_ch_base), \
54 .block_size = (_block_size), \
55 .pimr_mask = (_pimr_mask), \
56 })
57
58/*****************************************************************************
59Utility Functions*/
60/**
61 * get_ch_index - convert status to channel
62 * @status: status mask
63 * @base: dma ch base value
64 *
65 * Modify the status mask and return the channel index needing
66 * attention (or -1 if neither)
67 */
68static int get_ch_index(int *status, unsigned int base)
69{
70 int i;
71 for (i = 0; i < MAX_CHAN; i++) {
72 if (*status & (1 << (i + base))) {
73 *status = *status & ~(1 << (i + base));
74 pr_debug("MDMA: index %d New status %x\n", i, *status);
75 return i;
76 }
77 }
78 return -1;
79}
80
81/**
82 * get_block_ts - calculates dma transaction length
83 * @len: dma transfer length
84 * @tx_width: dma transfer src width
85 * @block_size: dma controller max block size
86 *
87 * Based on src width calculate the DMA trsaction length in data items
88 * return data items or FFFF if exceeds max length for block
89 */
90static int get_block_ts(int len, int tx_width, int block_size)
91{
92 int byte_width = 0, block_ts = 0;
93
94 switch (tx_width) {
95 case DMA_SLAVE_BUSWIDTH_1_BYTE:
96 byte_width = 1;
97 break;
98 case DMA_SLAVE_BUSWIDTH_2_BYTES:
99 byte_width = 2;
100 break;
101 case DMA_SLAVE_BUSWIDTH_4_BYTES:
102 default:
103 byte_width = 4;
104 break;
105 }
106
107 block_ts = len/byte_width;
108 if (block_ts > block_size)
109 block_ts = 0xFFFF;
110 return block_ts;
111}
112
113/*****************************************************************************
114DMAC1 interrupt Functions*/
115
116/**
117 * dmac1_mask_periphral_intr - mask the periphral interrupt
118 * @midc: dma channel for which masking is required
119 *
120 * Masks the DMA periphral interrupt
121 * this is valid for DMAC1 family controllers only
122 * This controller should have periphral mask registers already mapped
123 */
124static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
125{
126 u32 pimr;
127 struct middma_device *mid = to_middma_device(midc->chan.device);
128
129 if (mid->pimr_mask) {
130 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
131 pimr |= mid->pimr_mask;
132 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
133 }
134 return;
135}
136
137/**
138 * dmac1_unmask_periphral_intr - unmask the periphral interrupt
139 * @midc: dma channel for which masking is required
140 *
141 * UnMasks the DMA periphral interrupt,
142 * this is valid for DMAC1 family controllers only
143 * This controller should have periphral mask registers already mapped
144 */
145static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
146{
147 u32 pimr;
148 struct middma_device *mid = to_middma_device(midc->chan.device);
149
150 if (mid->pimr_mask) {
151 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
152 pimr &= ~mid->pimr_mask;
153 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
154 }
155 return;
156}
157
158/**
159 * enable_dma_interrupt - enable the periphral interrupt
160 * @midc: dma channel for which enable interrupt is required
161 *
162 * Enable the DMA periphral interrupt,
163 * this is valid for DMAC1 family controllers only
164 * This controller should have periphral mask registers already mapped
165 */
166static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
167{
168 dmac1_unmask_periphral_intr(midc);
169
170 /*en ch interrupts*/
171 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
172 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
173 return;
174}
175
176/**
177 * disable_dma_interrupt - disable the periphral interrupt
178 * @midc: dma channel for which disable interrupt is required
179 *
180 * Disable the DMA periphral interrupt,
181 * this is valid for DMAC1 family controllers only
182 * This controller should have periphral mask registers already mapped
183 */
184static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
185{
186 /*Check LPE PISR, make sure fwd is disabled*/
187 dmac1_mask_periphral_intr(midc);
188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
191 return;
192}
193
194/*****************************************************************************
195DMA channel helper Functions*/
196/**
197 * mid_desc_get - get a descriptor
198 * @midc: dma channel for which descriptor is required
199 *
200 * Obtain a descriptor for the channel. Returns NULL if none are free.
201 * Once the descriptor is returned it is private until put on another
202 * list or freed
203 */
204static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
205{
206 struct intel_mid_dma_desc *desc, *_desc;
207 struct intel_mid_dma_desc *ret = NULL;
208
209 spin_lock_bh(&midc->lock);
210 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
211 if (async_tx_test_ack(&desc->txd)) {
212 list_del(&desc->desc_node);
213 ret = desc;
214 break;
215 }
216 }
217 spin_unlock_bh(&midc->lock);
218 return ret;
219}
220
221/**
222 * mid_desc_put - put a descriptor
223 * @midc: dma channel for which descriptor is required
224 * @desc: descriptor to put
225 *
226 * Return a descriptor from lwn_desc_get back to the free pool
227 */
228static void midc_desc_put(struct intel_mid_dma_chan *midc,
229 struct intel_mid_dma_desc *desc)
230{
231 if (desc) {
232 spin_lock_bh(&midc->lock);
233 list_add_tail(&desc->desc_node, &midc->free_list);
234 spin_unlock_bh(&midc->lock);
235 }
236}
237/**
238 * midc_dostart - begin a DMA transaction
239 * @midc: channel for which txn is to be started
240 * @first: first descriptor of series
241 *
242 * Load a transaction into the engine. This must be called with midc->lock
243 * held and bh disabled.
244 */
245static void midc_dostart(struct intel_mid_dma_chan *midc,
246 struct intel_mid_dma_desc *first)
247{
248 struct middma_device *mid = to_middma_device(midc->chan.device);
249
250 /* channel is idle */
251 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
252 /*error*/
253 pr_err("ERR_MDMA: channel is busy in start\n");
254 /* The tasklet will hopefully advance the queue... */
255 return;
256 }
257 midc->busy = true;
258 /*write registers and en*/
259 iowrite32(first->sar, midc->ch_regs + SAR);
260 iowrite32(first->dar, midc->ch_regs + DAR);
261 iowrite32(first->lli_phys, midc->ch_regs + LLP);
262 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
263 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
264 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
265 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
266 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
267 (int)first->sar, (int)first->dar, first->cfg_hi,
268 first->cfg_lo, first->ctl_hi, first->ctl_lo);
269 first->status = DMA_IN_PROGRESS;
270
271 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
272}
273
274/**
275 * midc_descriptor_complete - process completed descriptor
276 * @midc: channel owning the descriptor
277 * @desc: the descriptor itself
278 *
279 * Process a completed descriptor and perform any callbacks upon
280 * the completion. The completion handling drops the lock during the
281 * callbacks but must be called with the lock held.
282 */
283static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
284 struct intel_mid_dma_desc *desc)
285{
286 struct dma_async_tx_descriptor *txd = &desc->txd;
287 dma_async_tx_callback callback_txd = NULL;
288 struct intel_mid_dma_lli *llitem;
289 void *param_txd = NULL;
290
291 midc->completed = txd->cookie;
292 callback_txd = txd->callback;
293 param_txd = txd->callback_param;
294
295 if (desc->lli != NULL) {
296 /*clear the DONE bit of completed LLI in memory*/
297 llitem = desc->lli + desc->current_lli;
298 llitem->ctl_hi &= CLEAR_DONE;
299 if (desc->current_lli < desc->lli_length-1)
300 (desc->current_lli)++;
301 else
302 desc->current_lli = 0;
303 }
304 spin_unlock_bh(&midc->lock);
305 if (callback_txd) {
306 pr_debug("MDMA: TXD callback set ... calling\n");
307 callback_txd(param_txd);
308 }
309 if (midc->raw_tfr) {
310 desc->status = DMA_SUCCESS;
311 if (desc->lli != NULL) {
312 pci_pool_free(desc->lli_pool, desc->lli,
313 desc->lli_phys);
314 pci_pool_destroy(desc->lli_pool);
315 }
316 list_move(&desc->desc_node, &midc->free_list);
317 midc->busy = false;
318 }
319 spin_lock_bh(&midc->lock);
320
321}
322/**
323 * midc_scan_descriptors - check the descriptors in channel
324 * mark completed when tx is completete
325 * @mid: device
326 * @midc: channel to scan
327 *
328 * Walk the descriptor chain for the device and process any entries
329 * that are complete.
330 */
331static void midc_scan_descriptors(struct middma_device *mid,
332 struct intel_mid_dma_chan *midc)
333{
334 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
335
336 /*tx is complete*/
337 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
338 if (desc->status == DMA_IN_PROGRESS)
339 midc_descriptor_complete(midc, desc);
340 }
341 return;
342 }
343/**
344 * midc_lli_fill_sg - Helper function to convert
345 * SG list to Linked List Items.
346 *@midc: Channel
347 *@desc: DMA descriptor
348 *@sglist: Pointer to SG list
349 *@sglen: SG list length
350 *@flags: DMA transaction flags
351 *
352 * Walk through the SG list and convert the SG list into Linked
353 * List Items (LLI).
354 */
355static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
356 struct intel_mid_dma_desc *desc,
357 struct scatterlist *sglist,
358 unsigned int sglen,
359 unsigned int flags)
360{
361 struct intel_mid_dma_slave *mids;
362 struct scatterlist *sg;
363 dma_addr_t lli_next, sg_phy_addr;
364 struct intel_mid_dma_lli *lli_bloc_desc;
365 union intel_mid_dma_ctl_lo ctl_lo;
366 union intel_mid_dma_ctl_hi ctl_hi;
367 int i;
368
369 pr_debug("MDMA: Entered midc_lli_fill_sg\n");
370 mids = midc->mid_slave;
371
372 lli_bloc_desc = desc->lli;
373 lli_next = desc->lli_phys;
374
375 ctl_lo.ctl_lo = desc->ctl_lo;
376 ctl_hi.ctl_hi = desc->ctl_hi;
377 for_each_sg(sglist, sg, sglen, i) {
378 /*Populate CTL_LOW and LLI values*/
379 if (i != sglen - 1) {
380 lli_next = lli_next +
381 sizeof(struct intel_mid_dma_lli);
382 } else {
383 /*Check for circular list, otherwise terminate LLI to ZERO*/
384 if (flags & DMA_PREP_CIRCULAR_LIST) {
385 pr_debug("MDMA: LLI is configured in circular mode\n");
386 lli_next = desc->lli_phys;
387 } else {
388 lli_next = 0;
389 ctl_lo.ctlx.llp_dst_en = 0;
390 ctl_lo.ctlx.llp_src_en = 0;
391 }
392 }
393 /*Populate CTL_HI values*/
394 ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
395 desc->width,
396 midc->dma->block_size);
397 /*Populate SAR and DAR values*/
398 sg_phy_addr = sg_phys(sg);
399 if (desc->dirn == DMA_TO_DEVICE) {
400 lli_bloc_desc->sar = sg_phy_addr;
401 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
402 } else if (desc->dirn == DMA_FROM_DEVICE) {
403 lli_bloc_desc->sar = mids->dma_slave.src_addr;
404 lli_bloc_desc->dar = sg_phy_addr;
405 }
406 /*Copy values into block descriptor in system memroy*/
407 lli_bloc_desc->llp = lli_next;
408 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
409 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
410
411 lli_bloc_desc++;
412 }
413 /*Copy very first LLI values to descriptor*/
414 desc->ctl_lo = desc->lli->ctl_lo;
415 desc->ctl_hi = desc->lli->ctl_hi;
416 desc->sar = desc->lli->sar;
417 desc->dar = desc->lli->dar;
418
419 return 0;
420}
421/*****************************************************************************
422DMA engine callback Functions*/
423/**
424 * intel_mid_dma_tx_submit - callback to submit DMA transaction
425 * @tx: dma engine descriptor
426 *
427 * Submit the DMA trasaction for this descriptor, start if ch idle
428 */
429static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
430{
431 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
432 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
433 dma_cookie_t cookie;
434
435 spin_lock_bh(&midc->lock);
436 cookie = midc->chan.cookie;
437
438 if (++cookie < 0)
439 cookie = 1;
440
441 midc->chan.cookie = cookie;
442 desc->txd.cookie = cookie;
443
444
445 if (list_empty(&midc->active_list))
446 list_add_tail(&desc->desc_node, &midc->active_list);
447 else
448 list_add_tail(&desc->desc_node, &midc->queue);
449
450 midc_dostart(midc, desc);
451 spin_unlock_bh(&midc->lock);
452
453 return cookie;
454}
455
456/**
457 * intel_mid_dma_issue_pending - callback to issue pending txn
458 * @chan: chan where pending trascation needs to be checked and submitted
459 *
460 * Call for scan to issue pending descriptors
461 */
462static void intel_mid_dma_issue_pending(struct dma_chan *chan)
463{
464 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
465
466 spin_lock_bh(&midc->lock);
467 if (!list_empty(&midc->queue))
468 midc_scan_descriptors(to_middma_device(chan->device), midc);
469 spin_unlock_bh(&midc->lock);
470}
471
472/**
473 * intel_mid_dma_tx_status - Return status of txn
474 * @chan: chan for where status needs to be checked
475 * @cookie: cookie for txn
476 * @txstate: DMA txn state
477 *
478 * Return status of DMA txn
479 */
480static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
481 dma_cookie_t cookie,
482 struct dma_tx_state *txstate)
483{
484 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
485 dma_cookie_t last_used;
486 dma_cookie_t last_complete;
487 int ret;
488
489 last_complete = midc->completed;
490 last_used = chan->cookie;
491
492 ret = dma_async_is_complete(cookie, last_complete, last_used);
493 if (ret != DMA_SUCCESS) {
494 midc_scan_descriptors(to_middma_device(chan->device), midc);
495
496 last_complete = midc->completed;
497 last_used = chan->cookie;
498
499 ret = dma_async_is_complete(cookie, last_complete, last_used);
500 }
501
502 if (txstate) {
503 txstate->last = last_complete;
504 txstate->used = last_used;
505 txstate->residue = 0;
506 }
507 return ret;
508}
509
510static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
511{
512 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
513 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
514 struct intel_mid_dma_slave *mid_slave;
515
516 BUG_ON(!midc);
517 BUG_ON(!slave);
518 pr_debug("MDMA: slave control called\n");
519
520 mid_slave = to_intel_mid_dma_slave(slave);
521
522 BUG_ON(!mid_slave);
523
524 midc->mid_slave = mid_slave;
525 return 0;
526}
527/**
528 * intel_mid_dma_device_control - DMA device control
529 * @chan: chan for DMA control
530 * @cmd: control cmd
531 * @arg: cmd arg value
532 *
533 * Perform DMA control command
534 */
535static int intel_mid_dma_device_control(struct dma_chan *chan,
536 enum dma_ctrl_cmd cmd, unsigned long arg)
537{
538 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
539 struct middma_device *mid = to_middma_device(chan->device);
540 struct intel_mid_dma_desc *desc, *_desc;
541 union intel_mid_dma_cfg_lo cfg_lo;
542
543 if (cmd == DMA_SLAVE_CONFIG)
544 return dma_slave_control(chan, arg);
545
546 if (cmd != DMA_TERMINATE_ALL)
547 return -ENXIO;
548
549 spin_lock_bh(&midc->lock);
550 if (midc->busy == false) {
551 spin_unlock_bh(&midc->lock);
552 return 0;
553 }
554 /*Suspend and disable the channel*/
555 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
556 cfg_lo.cfgx.ch_susp = 1;
557 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
558 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
559 midc->busy = false;
560 /* Disable interrupts */
561 disable_dma_interrupt(midc);
562 midc->descs_allocated = 0;
563
564 spin_unlock_bh(&midc->lock);
565 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
566 if (desc->lli != NULL) {
567 pci_pool_free(desc->lli_pool, desc->lli,
568 desc->lli_phys);
569 pci_pool_destroy(desc->lli_pool);
570 }
571 list_move(&desc->desc_node, &midc->free_list);
572 }
573 return 0;
574}
575
576
577/**
578 * intel_mid_dma_prep_memcpy - Prep memcpy txn
579 * @chan: chan for DMA transfer
580 * @dest: destn address
581 * @src: src address
582 * @len: DMA transfer len
583 * @flags: DMA flags
584 *
585 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
586 * The periphral txn details should be filled in slave structure properly
587 * Returns the descriptor for this txn
588 */
589static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
590 struct dma_chan *chan, dma_addr_t dest,
591 dma_addr_t src, size_t len, unsigned long flags)
592{
593 struct intel_mid_dma_chan *midc;
594 struct intel_mid_dma_desc *desc = NULL;
595 struct intel_mid_dma_slave *mids;
596 union intel_mid_dma_ctl_lo ctl_lo;
597 union intel_mid_dma_ctl_hi ctl_hi;
598 union intel_mid_dma_cfg_lo cfg_lo;
599 union intel_mid_dma_cfg_hi cfg_hi;
600 enum dma_slave_buswidth width;
601
602 pr_debug("MDMA: Prep for memcpy\n");
603 BUG_ON(!chan);
604 if (!len)
605 return NULL;
606
607 midc = to_intel_mid_dma_chan(chan);
608 BUG_ON(!midc);
609
610 mids = midc->mid_slave;
611 BUG_ON(!mids);
612
613 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
614 midc->dma->pci_id, midc->ch_id, len);
615 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
616 mids->cfg_mode, mids->dma_slave.direction,
617 mids->hs_mode, mids->dma_slave.src_addr_width);
618
619 /*calculate CFG_LO*/
620 if (mids->hs_mode == LNW_DMA_SW_HS) {
621 cfg_lo.cfg_lo = 0;
622 cfg_lo.cfgx.hs_sel_dst = 1;
623 cfg_lo.cfgx.hs_sel_src = 1;
624 } else if (mids->hs_mode == LNW_DMA_HW_HS)
625 cfg_lo.cfg_lo = 0x00000;
626
627 /*calculate CFG_HI*/
628 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
629 /*SW HS only*/
630 cfg_hi.cfg_hi = 0;
631 } else {
632 cfg_hi.cfg_hi = 0;
633 if (midc->dma->pimr_mask) {
634 cfg_hi.cfgx.protctl = 0x0; /*default value*/
635 cfg_hi.cfgx.fifo_mode = 1;
636 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
637 cfg_hi.cfgx.src_per = 0;
638 if (mids->device_instance == 0)
639 cfg_hi.cfgx.dst_per = 3;
640 if (mids->device_instance == 1)
641 cfg_hi.cfgx.dst_per = 1;
642 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
643 if (mids->device_instance == 0)
644 cfg_hi.cfgx.src_per = 2;
645 if (mids->device_instance == 1)
646 cfg_hi.cfgx.src_per = 0;
647 cfg_hi.cfgx.dst_per = 0;
648 }
649 } else {
650 cfg_hi.cfgx.protctl = 0x1; /*default value*/
651 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
652 midc->ch_id - midc->dma->chan_base;
653 }
654 }
655
656 /*calculate CTL_HI*/
657 ctl_hi.ctlx.reser = 0;
658 ctl_hi.ctlx.done = 0;
659 width = mids->dma_slave.src_addr_width;
660
661 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
662 pr_debug("MDMA:calc len %d for block size %d\n",
663 ctl_hi.ctlx.block_ts, midc->dma->block_size);
664 /*calculate CTL_LO*/
665 ctl_lo.ctl_lo = 0;
666 ctl_lo.ctlx.int_en = 1;
667 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
668 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
669 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
670 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
671
672 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
673 ctl_lo.ctlx.tt_fc = 0;
674 ctl_lo.ctlx.sinc = 0;
675 ctl_lo.ctlx.dinc = 0;
676 } else {
677 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
678 ctl_lo.ctlx.sinc = 0;
679 ctl_lo.ctlx.dinc = 2;
680 ctl_lo.ctlx.tt_fc = 1;
681 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
682 ctl_lo.ctlx.sinc = 2;
683 ctl_lo.ctlx.dinc = 0;
684 ctl_lo.ctlx.tt_fc = 2;
685 }
686 }
687
688 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
689 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
690
691 enable_dma_interrupt(midc);
692
693 desc = midc_desc_get(midc);
694 if (desc == NULL)
695 goto err_desc_get;
696 desc->sar = src;
697 desc->dar = dest ;
698 desc->len = len;
699 desc->cfg_hi = cfg_hi.cfg_hi;
700 desc->cfg_lo = cfg_lo.cfg_lo;
701 desc->ctl_lo = ctl_lo.ctl_lo;
702 desc->ctl_hi = ctl_hi.ctl_hi;
703 desc->width = width;
704 desc->dirn = mids->dma_slave.direction;
705 desc->lli_phys = 0;
706 desc->lli = NULL;
707 desc->lli_pool = NULL;
708 return &desc->txd;
709
710err_desc_get:
711 pr_err("ERR_MDMA: Failed to get desc\n");
712 midc_desc_put(midc, desc);
713 return NULL;
714}
715/**
716 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
717 * @chan: chan for DMA transfer
718 * @sgl: scatter gather list
719 * @sg_len: length of sg txn
720 * @direction: DMA transfer dirtn
721 * @flags: DMA flags
722 *
723 * Prepares LLI based periphral transfer
724 */
725static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
726 struct dma_chan *chan, struct scatterlist *sgl,
727 unsigned int sg_len, enum dma_data_direction direction,
728 unsigned long flags)
729{
730 struct intel_mid_dma_chan *midc = NULL;
731 struct intel_mid_dma_slave *mids = NULL;
732 struct intel_mid_dma_desc *desc = NULL;
733 struct dma_async_tx_descriptor *txd = NULL;
734 union intel_mid_dma_ctl_lo ctl_lo;
735
736 pr_debug("MDMA: Prep for slave SG\n");
737
738 if (!sg_len) {
739 pr_err("MDMA: Invalid SG length\n");
740 return NULL;
741 }
742 midc = to_intel_mid_dma_chan(chan);
743 BUG_ON(!midc);
744
745 mids = midc->mid_slave;
746 BUG_ON(!mids);
747
748 if (!midc->dma->pimr_mask) {
749 pr_debug("MDMA: SG list is not supported by this controller\n");
750 return NULL;
751 }
752
753 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
754 sg_len, direction, flags);
755
756 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
757 if (NULL == txd) {
758 pr_err("MDMA: Prep memcpy failed\n");
759 return NULL;
760 }
761 desc = to_intel_mid_dma_desc(txd);
762 desc->dirn = direction;
763 ctl_lo.ctl_lo = desc->ctl_lo;
764 ctl_lo.ctlx.llp_dst_en = 1;
765 ctl_lo.ctlx.llp_src_en = 1;
766 desc->ctl_lo = ctl_lo.ctl_lo;
767 desc->lli_length = sg_len;
768 desc->current_lli = 0;
769 /* DMA coherent memory pool for LLI descriptors*/
770 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
771 midc->dma->pdev,
772 (sizeof(struct intel_mid_dma_lli)*sg_len),
773 32, 0);
774 if (NULL == desc->lli_pool) {
775 pr_err("MID_DMA:LLI pool create failed\n");
776 return NULL;
777 }
778
779 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
780 if (!desc->lli) {
781 pr_err("MID_DMA: LLI alloc failed\n");
782 pci_pool_destroy(desc->lli_pool);
783 return NULL;
784 }
785
786 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
787 if (flags & DMA_PREP_INTERRUPT) {
788 iowrite32(UNMASK_INTR_REG(midc->ch_id),
789 midc->dma_base + MASK_BLOCK);
790 pr_debug("MDMA:Enabled Block interrupt\n");
791 }
792 return &desc->txd;
793}
794
795/**
796 * intel_mid_dma_free_chan_resources - Frees dma resources
797 * @chan: chan requiring attention
798 *
799 * Frees the allocated resources on this DMA chan
800 */
801static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
802{
803 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
804 struct middma_device *mid = to_middma_device(chan->device);
805 struct intel_mid_dma_desc *desc, *_desc;
806
807 if (true == midc->busy) {
808 /*trying to free ch in use!!!!!*/
809 pr_err("ERR_MDMA: trying to free ch in use\n");
810 }
811 pm_runtime_put(&mid->pdev->dev);
812 spin_lock_bh(&midc->lock);
813 midc->descs_allocated = 0;
814 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
815 list_del(&desc->desc_node);
816 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
817 }
818 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
819 list_del(&desc->desc_node);
820 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
821 }
822 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
823 list_del(&desc->desc_node);
824 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
825 }
826 spin_unlock_bh(&midc->lock);
827 midc->in_use = false;
828 midc->busy = false;
829 /* Disable CH interrupts */
830 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
831 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
832}
833
834/**
835 * intel_mid_dma_alloc_chan_resources - Allocate dma resources
836 * @chan: chan requiring attention
837 *
838 * Allocates DMA resources on this chan
839 * Return the descriptors allocated
840 */
841static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
842{
843 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
844 struct middma_device *mid = to_middma_device(chan->device);
845 struct intel_mid_dma_desc *desc;
846 dma_addr_t phys;
847 int i = 0;
848
849 pm_runtime_get_sync(&mid->pdev->dev);
850
851 if (mid->state == SUSPENDED) {
852 if (dma_resume(mid->pdev)) {
853 pr_err("ERR_MDMA: resume failed");
854 return -EFAULT;
855 }
856 }
857
858 /* ASSERT: channel is idle */
859 if (test_ch_en(mid->dma_base, midc->ch_id)) {
860 /*ch is not idle*/
861 pr_err("ERR_MDMA: ch not idle\n");
862 pm_runtime_put(&mid->pdev->dev);
863 return -EIO;
864 }
865 midc->completed = chan->cookie = 1;
866
867 spin_lock_bh(&midc->lock);
868 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
869 spin_unlock_bh(&midc->lock);
870 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
871 if (!desc) {
872 pr_err("ERR_MDMA: desc failed\n");
873 pm_runtime_put(&mid->pdev->dev);
874 return -ENOMEM;
875 /*check*/
876 }
877 dma_async_tx_descriptor_init(&desc->txd, chan);
878 desc->txd.tx_submit = intel_mid_dma_tx_submit;
879 desc->txd.flags = DMA_CTRL_ACK;
880 desc->txd.phys = phys;
881 spin_lock_bh(&midc->lock);
882 i = ++midc->descs_allocated;
883 list_add_tail(&desc->desc_node, &midc->free_list);
884 }
885 spin_unlock_bh(&midc->lock);
886 midc->in_use = true;
887 midc->busy = false;
888 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
889 return i;
890}
891
892/**
893 * midc_handle_error - Handle DMA txn error
894 * @mid: controller where error occured
895 * @midc: chan where error occured
896 *
897 * Scan the descriptor for error
898 */
899static void midc_handle_error(struct middma_device *mid,
900 struct intel_mid_dma_chan *midc)
901{
902 midc_scan_descriptors(mid, midc);
903}
904
905/**
906 * dma_tasklet - DMA interrupt tasklet
907 * @data: tasklet arg (the controller structure)
908 *
909 * Scan the controller for interrupts for completion/error
910 * Clear the interrupt and call for handling completion/error
911 */
912static void dma_tasklet(unsigned long data)
913{
914 struct middma_device *mid = NULL;
915 struct intel_mid_dma_chan *midc = NULL;
916 u32 status, raw_tfr, raw_block;
917 int i;
918
919 mid = (struct middma_device *)data;
920 if (mid == NULL) {
921 pr_err("ERR_MDMA: tasklet Null param\n");
922 return;
923 }
924 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
925 raw_tfr = ioread32(mid->dma_base + RAW_TFR);
926 raw_block = ioread32(mid->dma_base + RAW_BLOCK);
927 status = raw_tfr | raw_block;
928 status &= mid->intr_mask;
929 while (status) {
930 /*txn interrupt*/
931 i = get_ch_index(&status, mid->chan_base);
932 if (i < 0) {
933 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
934 return;
935 }
936 midc = &mid->ch[i];
937 if (midc == NULL) {
938 pr_err("ERR_MDMA:Null param midc\n");
939 return;
940 }
941 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
942 status, midc->ch_id, i);
943 midc->raw_tfr = raw_tfr;
944 midc->raw_block = raw_block;
945 spin_lock_bh(&midc->lock);
946 /*clearing this interrupts first*/
947 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
948 if (raw_block) {
949 iowrite32((1 << midc->ch_id),
950 mid->dma_base + CLEAR_BLOCK);
951 }
952 midc_scan_descriptors(mid, midc);
953 pr_debug("MDMA:Scan of desc... complete, unmasking\n");
954 iowrite32(UNMASK_INTR_REG(midc->ch_id),
955 mid->dma_base + MASK_TFR);
956 if (raw_block) {
957 iowrite32(UNMASK_INTR_REG(midc->ch_id),
958 mid->dma_base + MASK_BLOCK);
959 }
960 spin_unlock_bh(&midc->lock);
961 }
962
963 status = ioread32(mid->dma_base + RAW_ERR);
964 status &= mid->intr_mask;
965 while (status) {
966 /*err interrupt*/
967 i = get_ch_index(&status, mid->chan_base);
968 if (i < 0) {
969 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
970 return;
971 }
972 midc = &mid->ch[i];
973 if (midc == NULL) {
974 pr_err("ERR_MDMA:Null param midc\n");
975 return;
976 }
977 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
978 status, midc->ch_id, i);
979
980 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
981 spin_lock_bh(&midc->lock);
982 midc_handle_error(mid, midc);
983 iowrite32(UNMASK_INTR_REG(midc->ch_id),
984 mid->dma_base + MASK_ERR);
985 spin_unlock_bh(&midc->lock);
986 }
987 pr_debug("MDMA:Exiting takslet...\n");
988 return;
989}
990
991static void dma_tasklet1(unsigned long data)
992{
993 pr_debug("MDMA:in takslet1...\n");
994 return dma_tasklet(data);
995}
996
997static void dma_tasklet2(unsigned long data)
998{
999 pr_debug("MDMA:in takslet2...\n");
1000 return dma_tasklet(data);
1001}
1002
1003/**
1004 * intel_mid_dma_interrupt - DMA ISR
1005 * @irq: IRQ where interrupt occurred
1006 * @data: ISR cllback data (the controller structure)
1007 *
1008 * See if this is our interrupt if so then schedule the tasklet
1009 * otherwise ignore
1010 */
1011static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
1012{
1013 struct middma_device *mid = data;
1014 u32 tfr_status, err_status;
1015 int call_tasklet = 0;
1016
1017 tfr_status = ioread32(mid->dma_base + RAW_TFR);
1018 err_status = ioread32(mid->dma_base + RAW_ERR);
1019 if (!tfr_status && !err_status)
1020 return IRQ_NONE;
1021
1022 /*DMA Interrupt*/
1023 pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
1024 if (!mid) {
1025 pr_err("ERR_MDMA:null pointer mid\n");
1026 return -EINVAL;
1027 }
1028
1029 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
1030 tfr_status &= mid->intr_mask;
1031 if (tfr_status) {
1032 /*need to disable intr*/
1033 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
1034 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
1035 pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
1036 call_tasklet = 1;
1037 }
1038 err_status &= mid->intr_mask;
1039 if (err_status) {
1040 iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
1041 call_tasklet = 1;
1042 }
1043 if (call_tasklet)
1044 tasklet_schedule(&mid->tasklet);
1045
1046 return IRQ_HANDLED;
1047}
1048
1049static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
1050{
1051 return intel_mid_dma_interrupt(irq, data);
1052}
1053
1054static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
1055{
1056 return intel_mid_dma_interrupt(irq, data);
1057}
1058
1059/**
1060 * mid_setup_dma - Setup the DMA controller
1061 * @pdev: Controller PCI device structure
1062 *
1063 * Initilize the DMA controller, channels, registers with DMA engine,
1064 * ISR. Initilize DMA controller channels.
1065 */
1066static int mid_setup_dma(struct pci_dev *pdev)
1067{
1068 struct middma_device *dma = pci_get_drvdata(pdev);
1069 int err, i;
1070
1071 /* DMA coherent memory pool for DMA descriptor allocations */
1072 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
1073 sizeof(struct intel_mid_dma_desc),
1074 32, 0);
1075 if (NULL == dma->dma_pool) {
1076 pr_err("ERR_MDMA:pci_pool_create failed\n");
1077 err = -ENOMEM;
1078 goto err_dma_pool;
1079 }
1080
1081 INIT_LIST_HEAD(&dma->common.channels);
1082 dma->pci_id = pdev->device;
1083 if (dma->pimr_mask) {
1084 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
1085 LNW_PERIPHRAL_MASK_SIZE);
1086 if (dma->mask_reg == NULL) {
1087 pr_err("ERR_MDMA:Cant map periphral intr space !!\n");
1088 return -ENOMEM;
1089 }
1090 } else
1091 dma->mask_reg = NULL;
1092
1093 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
1094 /*init CH structures*/
1095 dma->intr_mask = 0;
1096 dma->state = RUNNING;
1097 for (i = 0; i < dma->max_chan; i++) {
1098 struct intel_mid_dma_chan *midch = &dma->ch[i];
1099
1100 midch->chan.device = &dma->common;
1101 midch->chan.cookie = 1;
1102 midch->chan.chan_id = i;
1103 midch->ch_id = dma->chan_base + i;
1104 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
1105
1106 midch->dma_base = dma->dma_base;
1107 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
1108 midch->dma = dma;
1109 dma->intr_mask |= 1 << (dma->chan_base + i);
1110 spin_lock_init(&midch->lock);
1111
1112 INIT_LIST_HEAD(&midch->active_list);
1113 INIT_LIST_HEAD(&midch->queue);
1114 INIT_LIST_HEAD(&midch->free_list);
1115 /*mask interrupts*/
1116 iowrite32(MASK_INTR_REG(midch->ch_id),
1117 dma->dma_base + MASK_BLOCK);
1118 iowrite32(MASK_INTR_REG(midch->ch_id),
1119 dma->dma_base + MASK_SRC_TRAN);
1120 iowrite32(MASK_INTR_REG(midch->ch_id),
1121 dma->dma_base + MASK_DST_TRAN);
1122 iowrite32(MASK_INTR_REG(midch->ch_id),
1123 dma->dma_base + MASK_ERR);
1124 iowrite32(MASK_INTR_REG(midch->ch_id),
1125 dma->dma_base + MASK_TFR);
1126
1127 disable_dma_interrupt(midch);
1128 list_add_tail(&midch->chan.device_node, &dma->common.channels);
1129 }
1130 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
1131
1132 /*init dma structure*/
1133 dma_cap_zero(dma->common.cap_mask);
1134 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
1135 dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
1136 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
1137 dma->common.dev = &pdev->dev;
1138 dma->common.chancnt = dma->max_chan;
1139
1140 dma->common.device_alloc_chan_resources =
1141 intel_mid_dma_alloc_chan_resources;
1142 dma->common.device_free_chan_resources =
1143 intel_mid_dma_free_chan_resources;
1144
1145 dma->common.device_tx_status = intel_mid_dma_tx_status;
1146 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
1147 dma->common.device_issue_pending = intel_mid_dma_issue_pending;
1148 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
1149 dma->common.device_control = intel_mid_dma_device_control;
1150
1151 /*enable dma cntrl*/
1152 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
1153
1154 /*register irq */
1155 if (dma->pimr_mask) {
1156 pr_debug("MDMA:Requesting irq shared for DMAC1\n");
1157 err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
1158 IRQF_SHARED, "INTEL_MID_DMAC1", dma);
1159 if (0 != err)
1160 goto err_irq;
1161 } else {
1162 dma->intr_mask = 0x03;
1163 pr_debug("MDMA:Requesting irq for DMAC2\n");
1164 err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
1165 IRQF_SHARED, "INTEL_MID_DMAC2", dma);
1166 if (0 != err)
1167 goto err_irq;
1168 }
1169 /*register device w/ engine*/
1170 err = dma_async_device_register(&dma->common);
1171 if (0 != err) {
1172 pr_err("ERR_MDMA:device_register failed: %d\n", err);
1173 goto err_engine;
1174 }
1175 if (dma->pimr_mask) {
1176 pr_debug("setting up tasklet1 for DMAC1\n");
1177 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
1178 } else {
1179 pr_debug("setting up tasklet2 for DMAC2\n");
1180 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
1181 }
1182 return 0;
1183
1184err_engine:
1185 free_irq(pdev->irq, dma);
1186err_irq:
1187 pci_pool_destroy(dma->dma_pool);
1188err_dma_pool:
1189 pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
1190 return err;
1191
1192}
1193
1194/**
1195 * middma_shutdown - Shutdown the DMA controller
1196 * @pdev: Controller PCI device structure
1197 *
1198 * Called by remove
1199 * Unregister DMa controller, clear all structures and free interrupt
1200 */
1201static void middma_shutdown(struct pci_dev *pdev)
1202{
1203 struct middma_device *device = pci_get_drvdata(pdev);
1204
1205 dma_async_device_unregister(&device->common);
1206 pci_pool_destroy(device->dma_pool);
1207 if (device->mask_reg)
1208 iounmap(device->mask_reg);
1209 if (device->dma_base)
1210 iounmap(device->dma_base);
1211 free_irq(pdev->irq, device);
1212 return;
1213}
1214
1215/**
1216 * intel_mid_dma_probe - PCI Probe
1217 * @pdev: Controller PCI device structure
1218 * @id: pci device id structure
1219 *
1220 * Initilize the PCI device, map BARs, query driver data.
1221 * Call setup_dma to complete contoller and chan initilzation
1222 */
1223static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
1224 const struct pci_device_id *id)
1225{
1226 struct middma_device *device;
1227 u32 base_addr, bar_size;
1228 struct intel_mid_dma_probe_info *info;
1229 int err;
1230
1231 pr_debug("MDMA: probe for %x\n", pdev->device);
1232 info = (void *)id->driver_data;
1233 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
1234 info->max_chan, info->ch_base,
1235 info->block_size, info->pimr_mask);
1236
1237 err = pci_enable_device(pdev);
1238 if (err)
1239 goto err_enable_device;
1240
1241 err = pci_request_regions(pdev, "intel_mid_dmac");
1242 if (err)
1243 goto err_request_regions;
1244
1245 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1246 if (err)
1247 goto err_set_dma_mask;
1248
1249 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1250 if (err)
1251 goto err_set_dma_mask;
1252
1253 device = kzalloc(sizeof(*device), GFP_KERNEL);
1254 if (!device) {
1255 pr_err("ERR_MDMA:kzalloc failed probe\n");
1256 err = -ENOMEM;
1257 goto err_kzalloc;
1258 }
1259 device->pdev = pci_dev_get(pdev);
1260
1261 base_addr = pci_resource_start(pdev, 0);
1262 bar_size = pci_resource_len(pdev, 0);
1263 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
1264 if (!device->dma_base) {
1265 pr_err("ERR_MDMA:ioremap failed\n");
1266 err = -ENOMEM;
1267 goto err_ioremap;
1268 }
1269 pci_set_drvdata(pdev, device);
1270 pci_set_master(pdev);
1271 device->max_chan = info->max_chan;
1272 device->chan_base = info->ch_base;
1273 device->block_size = info->block_size;
1274 device->pimr_mask = info->pimr_mask;
1275
1276 err = mid_setup_dma(pdev);
1277 if (err)
1278 goto err_dma;
1279
1280 pm_runtime_set_active(&pdev->dev);
1281 pm_runtime_enable(&pdev->dev);
1282 pm_runtime_allow(&pdev->dev);
1283 return 0;
1284
1285err_dma:
1286 iounmap(device->dma_base);
1287err_ioremap:
1288 pci_dev_put(pdev);
1289 kfree(device);
1290err_kzalloc:
1291err_set_dma_mask:
1292 pci_release_regions(pdev);
1293 pci_disable_device(pdev);
1294err_request_regions:
1295err_enable_device:
1296 pr_err("ERR_MDMA:Probe failed %d\n", err);
1297 return err;
1298}
1299
1300/**
1301 * intel_mid_dma_remove - PCI remove
1302 * @pdev: Controller PCI device structure
1303 *
1304 * Free up all resources and data
1305 * Call shutdown_dma to complete contoller and chan cleanup
1306 */
1307static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
1308{
1309 struct middma_device *device = pci_get_drvdata(pdev);
1310 middma_shutdown(pdev);
1311 pci_dev_put(pdev);
1312 kfree(device);
1313 pci_release_regions(pdev);
1314 pci_disable_device(pdev);
1315}
1316
1317/* Power Management */
1318/*
1319* dma_suspend - PCI suspend function
1320*
1321* @pci: PCI device structure
1322* @state: PM message
1323*
1324* This function is called by OS when a power event occurs
1325*/
1326int dma_suspend(struct pci_dev *pci, pm_message_t state)
1327{
1328 int i;
1329 struct middma_device *device = pci_get_drvdata(pci);
1330 pr_debug("MDMA: dma_suspend called\n");
1331
1332 for (i = 0; i < device->max_chan; i++) {
1333 if (device->ch[i].in_use)
1334 return -EAGAIN;
1335 }
1336 device->state = SUSPENDED;
1337 pci_set_drvdata(pci, device);
1338 pci_save_state(pci);
1339 pci_disable_device(pci);
1340 pci_set_power_state(pci, PCI_D3hot);
1341 return 0;
1342}
1343
1344/**
1345* dma_resume - PCI resume function
1346*
1347* @pci: PCI device structure
1348*
1349* This function is called by OS when a power event occurs
1350*/
1351int dma_resume(struct pci_dev *pci)
1352{
1353 int ret;
1354 struct middma_device *device = pci_get_drvdata(pci);
1355
1356 pr_debug("MDMA: dma_resume called\n");
1357 pci_set_power_state(pci, PCI_D0);
1358 pci_restore_state(pci);
1359 ret = pci_enable_device(pci);
1360 if (ret) {
1361 pr_err("MDMA: device cant be enabled for %x\n", pci->device);
1362 return ret;
1363 }
1364 device->state = RUNNING;
1365 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1366 pci_set_drvdata(pci, device);
1367 return 0;
1368}
1369
1370static int dma_runtime_suspend(struct device *dev)
1371{
1372 struct pci_dev *pci_dev = to_pci_dev(dev);
1373 return dma_suspend(pci_dev, PMSG_SUSPEND);
1374}
1375
1376static int dma_runtime_resume(struct device *dev)
1377{
1378 struct pci_dev *pci_dev = to_pci_dev(dev);
1379 return dma_resume(pci_dev);
1380}
1381
1382static int dma_runtime_idle(struct device *dev)
1383{
1384 struct pci_dev *pdev = to_pci_dev(dev);
1385 struct middma_device *device = pci_get_drvdata(pdev);
1386 int i;
1387
1388 for (i = 0; i < device->max_chan; i++) {
1389 if (device->ch[i].in_use)
1390 return -EAGAIN;
1391 }
1392
1393 return pm_schedule_suspend(dev, 0);
1394}
1395
1396/******************************************************************************
1397* PCI stuff
1398*/
1399static struct pci_device_id intel_mid_dma_ids[] = {
1400 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)},
1401 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)},
1402 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)},
1403 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)},
1404 { 0, }
1405};
1406MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
1407
1408static const struct dev_pm_ops intel_mid_dma_pm = {
1409 .runtime_suspend = dma_runtime_suspend,
1410 .runtime_resume = dma_runtime_resume,
1411 .runtime_idle = dma_runtime_idle,
1412};
1413
1414static struct pci_driver intel_mid_dma_pci_driver = {
1415 .name = "Intel MID DMA",
1416 .id_table = intel_mid_dma_ids,
1417 .probe = intel_mid_dma_probe,
1418 .remove = __devexit_p(intel_mid_dma_remove),
1419#ifdef CONFIG_PM
1420 .suspend = dma_suspend,
1421 .resume = dma_resume,
1422 .driver = {
1423 .pm = &intel_mid_dma_pm,
1424 },
1425#endif
1426};
1427
1428static int __init intel_mid_dma_init(void)
1429{
1430 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
1431 INTEL_MID_DMA_DRIVER_VERSION);
1432 return pci_register_driver(&intel_mid_dma_pci_driver);
1433}
1434fs_initcall(intel_mid_dma_init);
1435
1436static void __exit intel_mid_dma_exit(void)
1437{
1438 pci_unregister_driver(&intel_mid_dma_pci_driver);
1439}
1440module_exit(intel_mid_dma_exit);
1441
1442MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
1443MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
1444MODULE_LICENSE("GPL v2");
1445MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);