aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/intel_mid_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/intel_mid_dma.c')
-rw-r--r--drivers/dma/intel_mid_dma.c476
1 files changed, 390 insertions, 86 deletions
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index c2591e8d9b6e..338bc4eed1f3 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -25,6 +25,7 @@
25 */ 25 */
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/pm_runtime.h>
28#include <linux/intel_mid_dma.h> 29#include <linux/intel_mid_dma.h>
29 30
30#define MAX_CHAN 4 /*max ch across controllers*/ 31#define MAX_CHAN 4 /*max ch across controllers*/
@@ -91,13 +92,13 @@ static int get_block_ts(int len, int tx_width, int block_size)
91 int byte_width = 0, block_ts = 0; 92 int byte_width = 0, block_ts = 0;
92 93
93 switch (tx_width) { 94 switch (tx_width) {
94 case LNW_DMA_WIDTH_8BIT: 95 case DMA_SLAVE_BUSWIDTH_1_BYTE:
95 byte_width = 1; 96 byte_width = 1;
96 break; 97 break;
97 case LNW_DMA_WIDTH_16BIT: 98 case DMA_SLAVE_BUSWIDTH_2_BYTES:
98 byte_width = 2; 99 byte_width = 2;
99 break; 100 break;
100 case LNW_DMA_WIDTH_32BIT: 101 case DMA_SLAVE_BUSWIDTH_4_BYTES:
101 default: 102 default:
102 byte_width = 4; 103 byte_width = 4;
103 break; 104 break;
@@ -247,16 +248,17 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
247 struct middma_device *mid = to_middma_device(midc->chan.device); 248 struct middma_device *mid = to_middma_device(midc->chan.device);
248 249
249 /* channel is idle */ 250 /* channel is idle */
250 if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) { 251 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
251 /*error*/ 252 /*error*/
252 pr_err("ERR_MDMA: channel is busy in start\n"); 253 pr_err("ERR_MDMA: channel is busy in start\n");
253 /* The tasklet will hopefully advance the queue... */ 254 /* The tasklet will hopefully advance the queue... */
254 return; 255 return;
255 } 256 }
256 257 midc->busy = true;
257 /*write registers and en*/ 258 /*write registers and en*/
258 iowrite32(first->sar, midc->ch_regs + SAR); 259 iowrite32(first->sar, midc->ch_regs + SAR);
259 iowrite32(first->dar, midc->ch_regs + DAR); 260 iowrite32(first->dar, midc->ch_regs + DAR);
261 iowrite32(first->lli_phys, midc->ch_regs + LLP);
260 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 262 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
261 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 263 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
262 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); 264 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
@@ -264,9 +266,9 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
264 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 266 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
265 (int)first->sar, (int)first->dar, first->cfg_hi, 267 (int)first->sar, (int)first->dar, first->cfg_hi,
266 first->cfg_lo, first->ctl_hi, first->ctl_lo); 268 first->cfg_lo, first->ctl_hi, first->ctl_lo);
269 first->status = DMA_IN_PROGRESS;
267 270
268 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 271 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
269 first->status = DMA_IN_PROGRESS;
270} 272}
271 273
272/** 274/**
@@ -283,20 +285,36 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
283{ 285{
284 struct dma_async_tx_descriptor *txd = &desc->txd; 286 struct dma_async_tx_descriptor *txd = &desc->txd;
285 dma_async_tx_callback callback_txd = NULL; 287 dma_async_tx_callback callback_txd = NULL;
288 struct intel_mid_dma_lli *llitem;
286 void *param_txd = NULL; 289 void *param_txd = NULL;
287 290
288 midc->completed = txd->cookie; 291 midc->completed = txd->cookie;
289 callback_txd = txd->callback; 292 callback_txd = txd->callback;
290 param_txd = txd->callback_param; 293 param_txd = txd->callback_param;
291 294
292 list_move(&desc->desc_node, &midc->free_list); 295 if (desc->lli != NULL) {
293 296 /*clear the DONE bit of completed LLI in memory*/
297 llitem = desc->lli + desc->current_lli;
298 llitem->ctl_hi &= CLEAR_DONE;
299 if (desc->current_lli < desc->lli_length-1)
300 (desc->current_lli)++;
301 else
302 desc->current_lli = 0;
303 }
294 spin_unlock_bh(&midc->lock); 304 spin_unlock_bh(&midc->lock);
295 if (callback_txd) { 305 if (callback_txd) {
296 pr_debug("MDMA: TXD callback set ... calling\n"); 306 pr_debug("MDMA: TXD callback set ... calling\n");
297 callback_txd(param_txd); 307 callback_txd(param_txd);
298 spin_lock_bh(&midc->lock); 308 }
299 return; 309 if (midc->raw_tfr) {
310 desc->status = DMA_SUCCESS;
311 if (desc->lli != NULL) {
312 pci_pool_free(desc->lli_pool, desc->lli,
313 desc->lli_phys);
314 pci_pool_destroy(desc->lli_pool);
315 }
316 list_move(&desc->desc_node, &midc->free_list);
317 midc->busy = false;
300 } 318 }
301 spin_lock_bh(&midc->lock); 319 spin_lock_bh(&midc->lock);
302 320
@@ -317,14 +335,89 @@ static void midc_scan_descriptors(struct middma_device *mid,
317 335
318 /*tx is complete*/ 336 /*tx is complete*/
319 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 337 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
320 if (desc->status == DMA_IN_PROGRESS) { 338 if (desc->status == DMA_IN_PROGRESS)
321 desc->status = DMA_SUCCESS;
322 midc_descriptor_complete(midc, desc); 339 midc_descriptor_complete(midc, desc);
323 }
324 } 340 }
325 return; 341 return;
326} 342 }
343/**
344 * midc_lli_fill_sg - Helper function to convert
345 * SG list to Linked List Items.
346 *@midc: Channel
347 *@desc: DMA descriptor
348 *@sglist: Pointer to SG list
349 *@sglen: SG list length
350 *@flags: DMA transaction flags
351 *
352 * Walk through the SG list and convert the SG list into Linked
353 * List Items (LLI).
354 */
355static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
356 struct intel_mid_dma_desc *desc,
357 struct scatterlist *sglist,
358 unsigned int sglen,
359 unsigned int flags)
360{
361 struct intel_mid_dma_slave *mids;
362 struct scatterlist *sg;
363 dma_addr_t lli_next, sg_phy_addr;
364 struct intel_mid_dma_lli *lli_bloc_desc;
365 union intel_mid_dma_ctl_lo ctl_lo;
366 union intel_mid_dma_ctl_hi ctl_hi;
367 int i;
327 368
369 pr_debug("MDMA: Entered midc_lli_fill_sg\n");
370 mids = midc->mid_slave;
371
372 lli_bloc_desc = desc->lli;
373 lli_next = desc->lli_phys;
374
375 ctl_lo.ctl_lo = desc->ctl_lo;
376 ctl_hi.ctl_hi = desc->ctl_hi;
377 for_each_sg(sglist, sg, sglen, i) {
378 /*Populate CTL_LOW and LLI values*/
379 if (i != sglen - 1) {
380 lli_next = lli_next +
381 sizeof(struct intel_mid_dma_lli);
382 } else {
383 /*Check for circular list, otherwise terminate LLI to ZERO*/
384 if (flags & DMA_PREP_CIRCULAR_LIST) {
385 pr_debug("MDMA: LLI is configured in circular mode\n");
386 lli_next = desc->lli_phys;
387 } else {
388 lli_next = 0;
389 ctl_lo.ctlx.llp_dst_en = 0;
390 ctl_lo.ctlx.llp_src_en = 0;
391 }
392 }
393 /*Populate CTL_HI values*/
394 ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
395 desc->width,
396 midc->dma->block_size);
397 /*Populate SAR and DAR values*/
398 sg_phy_addr = sg_phys(sg);
399 if (desc->dirn == DMA_TO_DEVICE) {
400 lli_bloc_desc->sar = sg_phy_addr;
401 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
402 } else if (desc->dirn == DMA_FROM_DEVICE) {
403 lli_bloc_desc->sar = mids->dma_slave.src_addr;
404 lli_bloc_desc->dar = sg_phy_addr;
405 }
406 /*Copy values into block descriptor in system memroy*/
407 lli_bloc_desc->llp = lli_next;
408 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
409 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
410
411 lli_bloc_desc++;
412 }
413 /*Copy very first LLI values to descriptor*/
414 desc->ctl_lo = desc->lli->ctl_lo;
415 desc->ctl_hi = desc->lli->ctl_hi;
416 desc->sar = desc->lli->sar;
417 desc->dar = desc->lli->dar;
418
419 return 0;
420}
328/***************************************************************************** 421/*****************************************************************************
329DMA engine callback Functions*/ 422DMA engine callback Functions*/
330/** 423/**
@@ -349,12 +442,12 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
349 desc->txd.cookie = cookie; 442 desc->txd.cookie = cookie;
350 443
351 444
352 if (list_empty(&midc->active_list)) { 445 if (list_empty(&midc->active_list))
353 midc_dostart(midc, desc);
354 list_add_tail(&desc->desc_node, &midc->active_list); 446 list_add_tail(&desc->desc_node, &midc->active_list);
355 } else { 447 else
356 list_add_tail(&desc->desc_node, &midc->queue); 448 list_add_tail(&desc->desc_node, &midc->queue);
357 } 449
450 midc_dostart(midc, desc);
358 spin_unlock_bh(&midc->lock); 451 spin_unlock_bh(&midc->lock);
359 452
360 return cookie; 453 return cookie;
@@ -414,6 +507,23 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
414 return ret; 507 return ret;
415} 508}
416 509
510static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
511{
512 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
513 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
514 struct intel_mid_dma_slave *mid_slave;
515
516 BUG_ON(!midc);
517 BUG_ON(!slave);
518 pr_debug("MDMA: slave control called\n");
519
520 mid_slave = to_intel_mid_dma_slave(slave);
521
522 BUG_ON(!mid_slave);
523
524 midc->mid_slave = mid_slave;
525 return 0;
526}
417/** 527/**
418 * intel_mid_dma_device_control - DMA device control 528 * intel_mid_dma_device_control - DMA device control
419 * @chan: chan for DMA control 529 * @chan: chan for DMA control
@@ -428,49 +538,41 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
428 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 538 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
429 struct middma_device *mid = to_middma_device(chan->device); 539 struct middma_device *mid = to_middma_device(chan->device);
430 struct intel_mid_dma_desc *desc, *_desc; 540 struct intel_mid_dma_desc *desc, *_desc;
431 LIST_HEAD(list); 541 union intel_mid_dma_cfg_lo cfg_lo;
542
543 if (cmd == DMA_SLAVE_CONFIG)
544 return dma_slave_control(chan, arg);
432 545
433 if (cmd != DMA_TERMINATE_ALL) 546 if (cmd != DMA_TERMINATE_ALL)
434 return -ENXIO; 547 return -ENXIO;
435 548
436 spin_lock_bh(&midc->lock); 549 spin_lock_bh(&midc->lock);
437 if (midc->in_use == false) { 550 if (midc->busy == false) {
438 spin_unlock_bh(&midc->lock); 551 spin_unlock_bh(&midc->lock);
439 return 0; 552 return 0;
440 } 553 }
441 list_splice_init(&midc->free_list, &list); 554 /*Suspend and disable the channel*/
442 midc->descs_allocated = 0; 555 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
443 midc->slave = NULL; 556 cfg_lo.cfgx.ch_susp = 1;
444 557 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
558 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
559 midc->busy = false;
445 /* Disable interrupts */ 560 /* Disable interrupts */
446 disable_dma_interrupt(midc); 561 disable_dma_interrupt(midc);
562 midc->descs_allocated = 0;
447 563
448 spin_unlock_bh(&midc->lock); 564 spin_unlock_bh(&midc->lock);
449 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 565 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
450 pr_debug("MDMA: freeing descriptor %p\n", desc); 566 if (desc->lli != NULL) {
451 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 567 pci_pool_free(desc->lli_pool, desc->lli,
568 desc->lli_phys);
569 pci_pool_destroy(desc->lli_pool);
570 }
571 list_move(&desc->desc_node, &midc->free_list);
452 } 572 }
453 return 0; 573 return 0;
454} 574}
455 575
456/**
457 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
458 * @chan: chan for DMA transfer
459 * @sgl: scatter gather list
460 * @sg_len: length of sg txn
461 * @direction: DMA transfer dirtn
462 * @flags: DMA flags
463 *
464 * Do DMA sg txn: NOT supported now
465 */
466static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
467 struct dma_chan *chan, struct scatterlist *sgl,
468 unsigned int sg_len, enum dma_data_direction direction,
469 unsigned long flags)
470{
471 /*not supported now*/
472 return NULL;
473}
474 576
475/** 577/**
476 * intel_mid_dma_prep_memcpy - Prep memcpy txn 578 * intel_mid_dma_prep_memcpy - Prep memcpy txn
@@ -495,23 +597,24 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
495 union intel_mid_dma_ctl_hi ctl_hi; 597 union intel_mid_dma_ctl_hi ctl_hi;
496 union intel_mid_dma_cfg_lo cfg_lo; 598 union intel_mid_dma_cfg_lo cfg_lo;
497 union intel_mid_dma_cfg_hi cfg_hi; 599 union intel_mid_dma_cfg_hi cfg_hi;
498 enum intel_mid_dma_width width = 0; 600 enum dma_slave_buswidth width;
499 601
500 pr_debug("MDMA: Prep for memcpy\n"); 602 pr_debug("MDMA: Prep for memcpy\n");
501 WARN_ON(!chan); 603 BUG_ON(!chan);
502 if (!len) 604 if (!len)
503 return NULL; 605 return NULL;
504 606
505 mids = chan->private;
506 WARN_ON(!mids);
507
508 midc = to_intel_mid_dma_chan(chan); 607 midc = to_intel_mid_dma_chan(chan);
509 WARN_ON(!midc); 608 BUG_ON(!midc);
609
610 mids = midc->mid_slave;
611 BUG_ON(!mids);
510 612
511 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 613 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
512 midc->dma->pci_id, midc->ch_id, len); 614 midc->dma->pci_id, midc->ch_id, len);
513 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 615 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
514 mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width); 616 mids->cfg_mode, mids->dma_slave.direction,
617 mids->hs_mode, mids->dma_slave.src_addr_width);
515 618
516 /*calculate CFG_LO*/ 619 /*calculate CFG_LO*/
517 if (mids->hs_mode == LNW_DMA_SW_HS) { 620 if (mids->hs_mode == LNW_DMA_SW_HS) {
@@ -530,13 +633,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
530 if (midc->dma->pimr_mask) { 633 if (midc->dma->pimr_mask) {
531 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 634 cfg_hi.cfgx.protctl = 0x0; /*default value*/
532 cfg_hi.cfgx.fifo_mode = 1; 635 cfg_hi.cfgx.fifo_mode = 1;
533 if (mids->dirn == DMA_TO_DEVICE) { 636 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
534 cfg_hi.cfgx.src_per = 0; 637 cfg_hi.cfgx.src_per = 0;
535 if (mids->device_instance == 0) 638 if (mids->device_instance == 0)
536 cfg_hi.cfgx.dst_per = 3; 639 cfg_hi.cfgx.dst_per = 3;
537 if (mids->device_instance == 1) 640 if (mids->device_instance == 1)
538 cfg_hi.cfgx.dst_per = 1; 641 cfg_hi.cfgx.dst_per = 1;
539 } else if (mids->dirn == DMA_FROM_DEVICE) { 642 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
540 if (mids->device_instance == 0) 643 if (mids->device_instance == 0)
541 cfg_hi.cfgx.src_per = 2; 644 cfg_hi.cfgx.src_per = 2;
542 if (mids->device_instance == 1) 645 if (mids->device_instance == 1)
@@ -552,7 +655,8 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
552 655
553 /*calculate CTL_HI*/ 656 /*calculate CTL_HI*/
554 ctl_hi.ctlx.reser = 0; 657 ctl_hi.ctlx.reser = 0;
555 width = mids->src_width; 658 ctl_hi.ctlx.done = 0;
659 width = mids->dma_slave.src_addr_width;
556 660
557 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 661 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
558 pr_debug("MDMA:calc len %d for block size %d\n", 662 pr_debug("MDMA:calc len %d for block size %d\n",
@@ -560,21 +664,21 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
560 /*calculate CTL_LO*/ 664 /*calculate CTL_LO*/
561 ctl_lo.ctl_lo = 0; 665 ctl_lo.ctl_lo = 0;
562 ctl_lo.ctlx.int_en = 1; 666 ctl_lo.ctlx.int_en = 1;
563 ctl_lo.ctlx.dst_tr_width = mids->dst_width; 667 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
564 ctl_lo.ctlx.src_tr_width = mids->src_width; 668 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
565 ctl_lo.ctlx.dst_msize = mids->src_msize; 669 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
566 ctl_lo.ctlx.src_msize = mids->dst_msize; 670 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
567 671
568 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 672 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
569 ctl_lo.ctlx.tt_fc = 0; 673 ctl_lo.ctlx.tt_fc = 0;
570 ctl_lo.ctlx.sinc = 0; 674 ctl_lo.ctlx.sinc = 0;
571 ctl_lo.ctlx.dinc = 0; 675 ctl_lo.ctlx.dinc = 0;
572 } else { 676 } else {
573 if (mids->dirn == DMA_TO_DEVICE) { 677 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
574 ctl_lo.ctlx.sinc = 0; 678 ctl_lo.ctlx.sinc = 0;
575 ctl_lo.ctlx.dinc = 2; 679 ctl_lo.ctlx.dinc = 2;
576 ctl_lo.ctlx.tt_fc = 1; 680 ctl_lo.ctlx.tt_fc = 1;
577 } else if (mids->dirn == DMA_FROM_DEVICE) { 681 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
578 ctl_lo.ctlx.sinc = 2; 682 ctl_lo.ctlx.sinc = 2;
579 ctl_lo.ctlx.dinc = 0; 683 ctl_lo.ctlx.dinc = 0;
580 ctl_lo.ctlx.tt_fc = 2; 684 ctl_lo.ctlx.tt_fc = 2;
@@ -597,7 +701,10 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
597 desc->ctl_lo = ctl_lo.ctl_lo; 701 desc->ctl_lo = ctl_lo.ctl_lo;
598 desc->ctl_hi = ctl_hi.ctl_hi; 702 desc->ctl_hi = ctl_hi.ctl_hi;
599 desc->width = width; 703 desc->width = width;
600 desc->dirn = mids->dirn; 704 desc->dirn = mids->dma_slave.direction;
705 desc->lli_phys = 0;
706 desc->lli = NULL;
707 desc->lli_pool = NULL;
601 return &desc->txd; 708 return &desc->txd;
602 709
603err_desc_get: 710err_desc_get:
@@ -605,6 +712,85 @@ err_desc_get:
605 midc_desc_put(midc, desc); 712 midc_desc_put(midc, desc);
606 return NULL; 713 return NULL;
607} 714}
715/**
716 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
717 * @chan: chan for DMA transfer
718 * @sgl: scatter gather list
719 * @sg_len: length of sg txn
720 * @direction: DMA transfer dirtn
721 * @flags: DMA flags
722 *
723 * Prepares LLI based periphral transfer
724 */
725static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
726 struct dma_chan *chan, struct scatterlist *sgl,
727 unsigned int sg_len, enum dma_data_direction direction,
728 unsigned long flags)
729{
730 struct intel_mid_dma_chan *midc = NULL;
731 struct intel_mid_dma_slave *mids = NULL;
732 struct intel_mid_dma_desc *desc = NULL;
733 struct dma_async_tx_descriptor *txd = NULL;
734 union intel_mid_dma_ctl_lo ctl_lo;
735
736 pr_debug("MDMA: Prep for slave SG\n");
737
738 if (!sg_len) {
739 pr_err("MDMA: Invalid SG length\n");
740 return NULL;
741 }
742 midc = to_intel_mid_dma_chan(chan);
743 BUG_ON(!midc);
744
745 mids = midc->mid_slave;
746 BUG_ON(!mids);
747
748 if (!midc->dma->pimr_mask) {
749 pr_debug("MDMA: SG list is not supported by this controller\n");
750 return NULL;
751 }
752
753 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
754 sg_len, direction, flags);
755
756 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
757 if (NULL == txd) {
758 pr_err("MDMA: Prep memcpy failed\n");
759 return NULL;
760 }
761 desc = to_intel_mid_dma_desc(txd);
762 desc->dirn = direction;
763 ctl_lo.ctl_lo = desc->ctl_lo;
764 ctl_lo.ctlx.llp_dst_en = 1;
765 ctl_lo.ctlx.llp_src_en = 1;
766 desc->ctl_lo = ctl_lo.ctl_lo;
767 desc->lli_length = sg_len;
768 desc->current_lli = 0;
769 /* DMA coherent memory pool for LLI descriptors*/
770 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
771 midc->dma->pdev,
772 (sizeof(struct intel_mid_dma_lli)*sg_len),
773 32, 0);
774 if (NULL == desc->lli_pool) {
775 pr_err("MID_DMA:LLI pool create failed\n");
776 return NULL;
777 }
778
779 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
780 if (!desc->lli) {
781 pr_err("MID_DMA: LLI alloc failed\n");
782 pci_pool_destroy(desc->lli_pool);
783 return NULL;
784 }
785
786 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
787 if (flags & DMA_PREP_INTERRUPT) {
788 iowrite32(UNMASK_INTR_REG(midc->ch_id),
789 midc->dma_base + MASK_BLOCK);
790 pr_debug("MDMA:Enabled Block interrupt\n");
791 }
792 return &desc->txd;
793}
608 794
609/** 795/**
610 * intel_mid_dma_free_chan_resources - Frees dma resources 796 * intel_mid_dma_free_chan_resources - Frees dma resources
@@ -618,11 +804,11 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
618 struct middma_device *mid = to_middma_device(chan->device); 804 struct middma_device *mid = to_middma_device(chan->device);
619 struct intel_mid_dma_desc *desc, *_desc; 805 struct intel_mid_dma_desc *desc, *_desc;
620 806
621 if (true == midc->in_use) { 807 if (true == midc->busy) {
622 /*trying to free ch in use!!!!!*/ 808 /*trying to free ch in use!!!!!*/
623 pr_err("ERR_MDMA: trying to free ch in use\n"); 809 pr_err("ERR_MDMA: trying to free ch in use\n");
624 } 810 }
625 811 pm_runtime_put(&mid->pdev->dev);
626 spin_lock_bh(&midc->lock); 812 spin_lock_bh(&midc->lock);
627 midc->descs_allocated = 0; 813 midc->descs_allocated = 0;
628 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 814 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
@@ -639,6 +825,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
639 } 825 }
640 spin_unlock_bh(&midc->lock); 826 spin_unlock_bh(&midc->lock);
641 midc->in_use = false; 827 midc->in_use = false;
828 midc->busy = false;
642 /* Disable CH interrupts */ 829 /* Disable CH interrupts */
643 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 830 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
644 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); 831 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
@@ -659,11 +846,20 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
659 dma_addr_t phys; 846 dma_addr_t phys;
660 int i = 0; 847 int i = 0;
661 848
849 pm_runtime_get_sync(&mid->pdev->dev);
850
851 if (mid->state == SUSPENDED) {
852 if (dma_resume(mid->pdev)) {
853 pr_err("ERR_MDMA: resume failed");
854 return -EFAULT;
855 }
856 }
662 857
663 /* ASSERT: channel is idle */ 858 /* ASSERT: channel is idle */
664 if (test_ch_en(mid->dma_base, midc->ch_id)) { 859 if (test_ch_en(mid->dma_base, midc->ch_id)) {
665 /*ch is not idle*/ 860 /*ch is not idle*/
666 pr_err("ERR_MDMA: ch not idle\n"); 861 pr_err("ERR_MDMA: ch not idle\n");
862 pm_runtime_put(&mid->pdev->dev);
667 return -EIO; 863 return -EIO;
668 } 864 }
669 midc->completed = chan->cookie = 1; 865 midc->completed = chan->cookie = 1;
@@ -674,6 +870,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
674 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 870 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
675 if (!desc) { 871 if (!desc) {
676 pr_err("ERR_MDMA: desc failed\n"); 872 pr_err("ERR_MDMA: desc failed\n");
873 pm_runtime_put(&mid->pdev->dev);
677 return -ENOMEM; 874 return -ENOMEM;
678 /*check*/ 875 /*check*/
679 } 876 }
@@ -686,7 +883,8 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
686 list_add_tail(&desc->desc_node, &midc->free_list); 883 list_add_tail(&desc->desc_node, &midc->free_list);
687 } 884 }
688 spin_unlock_bh(&midc->lock); 885 spin_unlock_bh(&midc->lock);
689 midc->in_use = false; 886 midc->in_use = true;
887 midc->busy = false;
690 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 888 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
691 return i; 889 return i;
692} 890}
@@ -715,7 +913,7 @@ static void dma_tasklet(unsigned long data)
715{ 913{
716 struct middma_device *mid = NULL; 914 struct middma_device *mid = NULL;
717 struct intel_mid_dma_chan *midc = NULL; 915 struct intel_mid_dma_chan *midc = NULL;
718 u32 status; 916 u32 status, raw_tfr, raw_block;
719 int i; 917 int i;
720 918
721 mid = (struct middma_device *)data; 919 mid = (struct middma_device *)data;
@@ -724,8 +922,9 @@ static void dma_tasklet(unsigned long data)
724 return; 922 return;
725 } 923 }
726 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 924 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
727 status = ioread32(mid->dma_base + RAW_TFR); 925 raw_tfr = ioread32(mid->dma_base + RAW_TFR);
728 pr_debug("MDMA:RAW_TFR %x\n", status); 926 raw_block = ioread32(mid->dma_base + RAW_BLOCK);
927 status = raw_tfr | raw_block;
729 status &= mid->intr_mask; 928 status &= mid->intr_mask;
730 while (status) { 929 while (status) {
731 /*txn interrupt*/ 930 /*txn interrupt*/
@@ -741,15 +940,23 @@ static void dma_tasklet(unsigned long data)
741 } 940 }
742 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 941 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
743 status, midc->ch_id, i); 942 status, midc->ch_id, i);
943 midc->raw_tfr = raw_tfr;
944 midc->raw_block = raw_block;
945 spin_lock_bh(&midc->lock);
744 /*clearing this interrupts first*/ 946 /*clearing this interrupts first*/
745 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 947 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
746 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK); 948 if (raw_block) {
747 949 iowrite32((1 << midc->ch_id),
748 spin_lock_bh(&midc->lock); 950 mid->dma_base + CLEAR_BLOCK);
951 }
749 midc_scan_descriptors(mid, midc); 952 midc_scan_descriptors(mid, midc);
750 pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 953 pr_debug("MDMA:Scan of desc... complete, unmasking\n");
751 iowrite32(UNMASK_INTR_REG(midc->ch_id), 954 iowrite32(UNMASK_INTR_REG(midc->ch_id),
752 mid->dma_base + MASK_TFR); 955 mid->dma_base + MASK_TFR);
956 if (raw_block) {
957 iowrite32(UNMASK_INTR_REG(midc->ch_id),
958 mid->dma_base + MASK_BLOCK);
959 }
753 spin_unlock_bh(&midc->lock); 960 spin_unlock_bh(&midc->lock);
754 } 961 }
755 962
@@ -804,9 +1011,14 @@ static void dma_tasklet2(unsigned long data)
804static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 1011static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
805{ 1012{
806 struct middma_device *mid = data; 1013 struct middma_device *mid = data;
807 u32 status; 1014 u32 tfr_status, err_status;
808 int call_tasklet = 0; 1015 int call_tasklet = 0;
809 1016
1017 tfr_status = ioread32(mid->dma_base + RAW_TFR);
1018 err_status = ioread32(mid->dma_base + RAW_ERR);
1019 if (!tfr_status && !err_status)
1020 return IRQ_NONE;
1021
810 /*DMA Interrupt*/ 1022 /*DMA Interrupt*/
811 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 1023 pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
812 if (!mid) { 1024 if (!mid) {
@@ -814,19 +1026,18 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
814 return -EINVAL; 1026 return -EINVAL;
815 } 1027 }
816 1028
817 status = ioread32(mid->dma_base + RAW_TFR); 1029 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
818 pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask); 1030 tfr_status &= mid->intr_mask;
819 status &= mid->intr_mask; 1031 if (tfr_status) {
820 if (status) {
821 /*need to disable intr*/ 1032 /*need to disable intr*/
822 iowrite32((status << 8), mid->dma_base + MASK_TFR); 1033 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
823 pr_debug("MDMA: Calling tasklet %x\n", status); 1034 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
1035 pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
824 call_tasklet = 1; 1036 call_tasklet = 1;
825 } 1037 }
826 status = ioread32(mid->dma_base + RAW_ERR); 1038 err_status &= mid->intr_mask;
827 status &= mid->intr_mask; 1039 if (err_status) {
828 if (status) { 1040 iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
829 iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR);
830 call_tasklet = 1; 1041 call_tasklet = 1;
831 } 1042 }
832 if (call_tasklet) 1043 if (call_tasklet)
@@ -856,7 +1067,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
856{ 1067{
857 struct middma_device *dma = pci_get_drvdata(pdev); 1068 struct middma_device *dma = pci_get_drvdata(pdev);
858 int err, i; 1069 int err, i;
859 unsigned int irq_level;
860 1070
861 /* DMA coherent memory pool for DMA descriptor allocations */ 1071 /* DMA coherent memory pool for DMA descriptor allocations */
862 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, 1072 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
@@ -884,6 +1094,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
884 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 1094 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
885 /*init CH structures*/ 1095 /*init CH structures*/
886 dma->intr_mask = 0; 1096 dma->intr_mask = 0;
1097 dma->state = RUNNING;
887 for (i = 0; i < dma->max_chan; i++) { 1098 for (i = 0; i < dma->max_chan; i++) {
888 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1099 struct intel_mid_dma_chan *midch = &dma->ch[i];
889 1100
@@ -943,7 +1154,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
943 1154
944 /*register irq */ 1155 /*register irq */
945 if (dma->pimr_mask) { 1156 if (dma->pimr_mask) {
946 irq_level = IRQF_SHARED;
947 pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 1157 pr_debug("MDMA:Requesting irq shared for DMAC1\n");
948 err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 1158 err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
949 IRQF_SHARED, "INTEL_MID_DMAC1", dma); 1159 IRQF_SHARED, "INTEL_MID_DMAC1", dma);
@@ -951,10 +1161,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
951 goto err_irq; 1161 goto err_irq;
952 } else { 1162 } else {
953 dma->intr_mask = 0x03; 1163 dma->intr_mask = 0x03;
954 irq_level = 0;
955 pr_debug("MDMA:Requesting irq for DMAC2\n"); 1164 pr_debug("MDMA:Requesting irq for DMAC2\n");
956 err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 1165 err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
957 0, "INTEL_MID_DMAC2", dma); 1166 IRQF_SHARED, "INTEL_MID_DMAC2", dma);
958 if (0 != err) 1167 if (0 != err)
959 goto err_irq; 1168 goto err_irq;
960 } 1169 }
@@ -1070,6 +1279,9 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
1070 if (err) 1279 if (err)
1071 goto err_dma; 1280 goto err_dma;
1072 1281
1282 pm_runtime_set_active(&pdev->dev);
1283 pm_runtime_enable(&pdev->dev);
1284 pm_runtime_allow(&pdev->dev);
1073 return 0; 1285 return 0;
1074 1286
1075err_dma: 1287err_dma:
@@ -1104,6 +1316,85 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
1104 pci_disable_device(pdev); 1316 pci_disable_device(pdev);
1105} 1317}
1106 1318
1319/* Power Management */
1320/*
1321* dma_suspend - PCI suspend function
1322*
1323* @pci: PCI device structure
1324* @state: PM message
1325*
1326* This function is called by OS when a power event occurs
1327*/
1328int dma_suspend(struct pci_dev *pci, pm_message_t state)
1329{
1330 int i;
1331 struct middma_device *device = pci_get_drvdata(pci);
1332 pr_debug("MDMA: dma_suspend called\n");
1333
1334 for (i = 0; i < device->max_chan; i++) {
1335 if (device->ch[i].in_use)
1336 return -EAGAIN;
1337 }
1338 device->state = SUSPENDED;
1339 pci_set_drvdata(pci, device);
1340 pci_save_state(pci);
1341 pci_disable_device(pci);
1342 pci_set_power_state(pci, PCI_D3hot);
1343 return 0;
1344}
1345
1346/**
1347* dma_resume - PCI resume function
1348*
1349* @pci: PCI device structure
1350*
1351* This function is called by OS when a power event occurs
1352*/
1353int dma_resume(struct pci_dev *pci)
1354{
1355 int ret;
1356 struct middma_device *device = pci_get_drvdata(pci);
1357
1358 pr_debug("MDMA: dma_resume called\n");
1359 pci_set_power_state(pci, PCI_D0);
1360 pci_restore_state(pci);
1361 ret = pci_enable_device(pci);
1362 if (ret) {
1363 pr_err("MDMA: device cant be enabled for %x\n", pci->device);
1364 return ret;
1365 }
1366 device->state = RUNNING;
1367 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1368 pci_set_drvdata(pci, device);
1369 return 0;
1370}
1371
1372static int dma_runtime_suspend(struct device *dev)
1373{
1374 struct pci_dev *pci_dev = to_pci_dev(dev);
1375 return dma_suspend(pci_dev, PMSG_SUSPEND);
1376}
1377
1378static int dma_runtime_resume(struct device *dev)
1379{
1380 struct pci_dev *pci_dev = to_pci_dev(dev);
1381 return dma_resume(pci_dev);
1382}
1383
1384static int dma_runtime_idle(struct device *dev)
1385{
1386 struct pci_dev *pdev = to_pci_dev(dev);
1387 struct middma_device *device = pci_get_drvdata(pdev);
1388 int i;
1389
1390 for (i = 0; i < device->max_chan; i++) {
1391 if (device->ch[i].in_use)
1392 return -EAGAIN;
1393 }
1394
1395 return pm_schedule_suspend(dev, 0);
1396}
1397
1107/****************************************************************************** 1398/******************************************************************************
1108* PCI stuff 1399* PCI stuff
1109*/ 1400*/
@@ -1116,11 +1407,24 @@ static struct pci_device_id intel_mid_dma_ids[] = {
1116}; 1407};
1117MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1408MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
1118 1409
1410static const struct dev_pm_ops intel_mid_dma_pm = {
1411 .runtime_suspend = dma_runtime_suspend,
1412 .runtime_resume = dma_runtime_resume,
1413 .runtime_idle = dma_runtime_idle,
1414};
1415
1119static struct pci_driver intel_mid_dma_pci = { 1416static struct pci_driver intel_mid_dma_pci = {
1120 .name = "Intel MID DMA", 1417 .name = "Intel MID DMA",
1121 .id_table = intel_mid_dma_ids, 1418 .id_table = intel_mid_dma_ids,
1122 .probe = intel_mid_dma_probe, 1419 .probe = intel_mid_dma_probe,
1123 .remove = __devexit_p(intel_mid_dma_remove), 1420 .remove = __devexit_p(intel_mid_dma_remove),
1421#ifdef CONFIG_PM
1422 .suspend = dma_suspend,
1423 .resume = dma_resume,
1424 .driver = {
1425 .pm = &intel_mid_dma_pm,
1426 },
1427#endif
1124}; 1428};
1125 1429
1126static int __init intel_mid_dma_init(void) 1430static int __init intel_mid_dma_init(void)