aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-04 21:02:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-04 21:02:25 -0400
commitfba9569924e06da076cb2ad12474bbd82d69f54d (patch)
treef0b7d9c82f8dd90f0dc757a4c00afc0872fc1484 /drivers/dma
parent3d0a8d10cfb4cc3d1877c29a866ee7d8a46aa2fa (diff)
parent4598fc2c94b68740e0269db03c98a1e7ad5af773 (diff)
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (63 commits) dmaengine: mid_dma: mask_peripheral_interrupt only when dmac is idle dmaengine/ep93xx_dma: add module.h include pch_dma: Reduce wasting memory pch_dma: Fix suspend issue dma/timberdale: free_irq() on an error path dma: shdma: transfer based runtime PM dmaengine: shdma: protect against the IRQ handler dmaengine i.MX DMA/SDMA: add missing include of linux/module.h dmaengine: delete redundant chan_id and chancnt initialization in dma drivers dmaengine/amba-pl08x: Check txd->llis_va before freeing dma_pool dmaengine/amba-pl08x: Add support for sg len greater than one for slave transfers serial: sh-sci: don't filter on DMA device, use only channel ID ARM: SAMSUNG: Remove Samsung specific enum type for dma direction ASoC: Samsung: Update DMA interface spi/s3c64xx: Merge dma control code spi/s3c64xx: Add support DMA engine API ARM: SAMSUNG: Remove S3C-PL330-DMA driver ARM: S5P64X0: Use generic DMA PL330 driver ARM: S5PC100: Use generic DMA PL330 driver ARM: S5PV210: Use generic DMA PL330 driver ... Fix up fairly trivial conflicts in - arch/arm/mach-exynos4/{Kconfig,clock.c} - arch/arm/mach-s5p64x0/dma.c
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig3
-rw-r--r--drivers/dma/amba-pl08x.c640
-rw-r--r--drivers/dma/at_hdmac.c164
-rw-r--r--drivers/dma/at_hdmac_regs.h24
-rw-r--r--drivers/dma/dmatest.c23
-rw-r--r--drivers/dma/dw_dmac.c5
-rw-r--r--drivers/dma/ep93xx_dma.c1
-rw-r--r--drivers/dma/imx-dma.c1
-rw-r--r--drivers/dma/imx-sdma.c48
-rw-r--r--drivers/dma/intel_mid_dma.c9
-rw-r--r--drivers/dma/mpc512x_dma.c1
-rw-r--r--drivers/dma/mxs-dma.c45
-rw-r--r--drivers/dma/pch_dma.c7
-rw-r--r--drivers/dma/pl330.c231
-rw-r--r--drivers/dma/shdma.c129
-rw-r--r--drivers/dma/shdma.h7
-rw-r--r--drivers/dma/timb_dma.c5
17 files changed, 864 insertions, 479 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2e3b3d38c465..ab8f469f5cf8 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -193,7 +193,8 @@ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
193config PL330_DMA 193config PL330_DMA
194 tristate "DMA API Driver for PL330" 194 tristate "DMA API Driver for PL330"
195 select DMA_ENGINE 195 select DMA_ENGINE
196 depends on PL330 196 depends on ARM_AMBA
197 select PL330
197 help 198 help
198 Select if your platform has one or more PL330 DMACs. 199 Select if your platform has one or more PL330 DMACs.
199 You need to provide platform specific settings via 200 You need to provide platform specific settings via
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index be21e3f138a8..b7cbd1ab1db1 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -66,32 +66,29 @@
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry. 67 * will then move to the next LLI entry.
68 * 68 *
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
73 *
74 * Global TODO: 69 * Global TODO:
75 * - Break out common code from arch/arm/mach-s3c64xx and share 70 * - Break out common code from arch/arm/mach-s3c64xx and share
76 */ 71 */
77#include <linux/device.h>
78#include <linux/init.h>
79#include <linux/module.h>
80#include <linux/interrupt.h>
81#include <linux/slab.h>
82#include <linux/delay.h>
83#include <linux/dma-mapping.h>
84#include <linux/dmapool.h>
85#include <linux/dmaengine.h>
86#include <linux/amba/bus.h> 72#include <linux/amba/bus.h>
87#include <linux/amba/pl08x.h> 73#include <linux/amba/pl08x.h>
88#include <linux/debugfs.h> 74#include <linux/debugfs.h>
75#include <linux/delay.h>
76#include <linux/device.h>
77#include <linux/dmaengine.h>
78#include <linux/dmapool.h>
79#include <linux/dma-mapping.h>
80#include <linux/init.h>
81#include <linux/interrupt.h>
82#include <linux/module.h>
83#include <linux/pm_runtime.h>
89#include <linux/seq_file.h> 84#include <linux/seq_file.h>
90 85#include <linux/slab.h>
91#include <asm/hardware/pl080.h> 86#include <asm/hardware/pl080.h>
92 87
93#define DRIVER_NAME "pl08xdmac" 88#define DRIVER_NAME "pl08xdmac"
94 89
90static struct amba_driver pl08x_amba_driver;
91
95/** 92/**
96 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 93 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
97 * @channels: the number of channels available in this variant 94 * @channels: the number of channels available in this variant
@@ -126,7 +123,8 @@ struct pl08x_lli {
126 * @phy_chans: array of data for the physical channels 123 * @phy_chans: array of data for the physical channels
127 * @pool: a pool for the LLI descriptors 124 * @pool: a pool for the LLI descriptors
128 * @pool_ctr: counter of LLIs in the pool 125 * @pool_ctr: counter of LLIs in the pool
129 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches 126 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
127 * fetches
130 * @mem_buses: set to indicate memory transfers on AHB2. 128 * @mem_buses: set to indicate memory transfers on AHB2.
131 * @lock: a spinlock for this struct 129 * @lock: a spinlock for this struct
132 */ 130 */
@@ -149,14 +147,6 @@ struct pl08x_driver_data {
149 * PL08X specific defines 147 * PL08X specific defines
150 */ 148 */
151 149
152/*
153 * Memory boundaries: the manual for PL08x says that the controller
154 * cannot read past a 1KiB boundary, so these defines are used to
155 * create transfer LLIs that do not cross such boundaries.
156 */
157#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
158#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
159
160/* Size (bytes) of each LLI buffer allocated for one transfer */ 150/* Size (bytes) of each LLI buffer allocated for one transfer */
161# define PL08X_LLI_TSFR_SIZE 0x2000 151# define PL08X_LLI_TSFR_SIZE 0x2000
162 152
@@ -272,7 +262,6 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
272 writel(val, ch->base + PL080_CH_CONFIG); 262 writel(val, ch->base + PL080_CH_CONFIG);
273} 263}
274 264
275
276/* 265/*
277 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 266 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
278 * clears any pending interrupt status. This should not be used for 267 * clears any pending interrupt status. This should not be used for
@@ -363,7 +352,9 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
363 if (!list_empty(&plchan->pend_list)) { 352 if (!list_empty(&plchan->pend_list)) {
364 struct pl08x_txd *txdi; 353 struct pl08x_txd *txdi;
365 list_for_each_entry(txdi, &plchan->pend_list, node) { 354 list_for_each_entry(txdi, &plchan->pend_list, node) {
366 bytes += txdi->len; 355 struct pl08x_sg *dsg;
356 list_for_each_entry(dsg, &txd->dsg_list, node)
357 bytes += dsg->len;
367 } 358 }
368 } 359 }
369 360
@@ -407,6 +398,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
407 return NULL; 398 return NULL;
408 } 399 }
409 400
401 pm_runtime_get_sync(&pl08x->adev->dev);
410 return ch; 402 return ch;
411} 403}
412 404
@@ -420,6 +412,8 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
420 /* Stop the channel and clear its interrupts */ 412 /* Stop the channel and clear its interrupts */
421 pl08x_terminate_phy_chan(pl08x, ch); 413 pl08x_terminate_phy_chan(pl08x, ch);
422 414
415 pm_runtime_put(&pl08x->adev->dev);
416
423 /* Mark it as free */ 417 /* Mark it as free */
424 ch->serving = NULL; 418 ch->serving = NULL;
425 spin_unlock_irqrestore(&ch->lock, flags); 419 spin_unlock_irqrestore(&ch->lock, flags);
@@ -499,36 +493,30 @@ struct pl08x_lli_build_data {
499}; 493};
500 494
501/* 495/*
502 * Autoselect a master bus to use for the transfer this prefers the 496 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
503 * destination bus if both available if fixed address on one bus the 497 * victim in case src & dest are not similarly aligned. i.e. If after aligning
504 * other will be chosen 498 * masters address with width requirements of transfer (by sending few byte by
499 * byte data), slave is still not aligned, then its width will be reduced to
500 * BYTE.
501 * - prefers the destination bus if both available
502 * - prefers bus with fixed address (i.e. peripheral)
505 */ 503 */
506static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 504static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
507 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 505 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
508{ 506{
509 if (!(cctl & PL080_CONTROL_DST_INCR)) { 507 if (!(cctl & PL080_CONTROL_DST_INCR)) {
510 *mbus = &bd->srcbus;
511 *sbus = &bd->dstbus;
512 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
513 *mbus = &bd->dstbus; 508 *mbus = &bd->dstbus;
514 *sbus = &bd->srcbus; 509 *sbus = &bd->srcbus;
510 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
511 *mbus = &bd->srcbus;
512 *sbus = &bd->dstbus;
515 } else { 513 } else {
516 if (bd->dstbus.buswidth == 4) { 514 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
517 *mbus = &bd->dstbus; 515 *mbus = &bd->dstbus;
518 *sbus = &bd->srcbus; 516 *sbus = &bd->srcbus;
519 } else if (bd->srcbus.buswidth == 4) { 517 } else {
520 *mbus = &bd->srcbus;
521 *sbus = &bd->dstbus;
522 } else if (bd->dstbus.buswidth == 2) {
523 *mbus = &bd->dstbus;
524 *sbus = &bd->srcbus;
525 } else if (bd->srcbus.buswidth == 2) {
526 *mbus = &bd->srcbus; 518 *mbus = &bd->srcbus;
527 *sbus = &bd->dstbus; 519 *sbus = &bd->dstbus;
528 } else {
529 /* bd->srcbus.buswidth == 1 */
530 *mbus = &bd->dstbus;
531 *sbus = &bd->srcbus;
532 } 520 }
533 } 521 }
534} 522}
@@ -547,7 +535,8 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
547 llis_va[num_llis].cctl = cctl; 535 llis_va[num_llis].cctl = cctl;
548 llis_va[num_llis].src = bd->srcbus.addr; 536 llis_va[num_llis].src = bd->srcbus.addr;
549 llis_va[num_llis].dst = bd->dstbus.addr; 537 llis_va[num_llis].dst = bd->dstbus.addr;
550 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); 538 llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
539 sizeof(struct pl08x_lli);
551 llis_va[num_llis].lli |= bd->lli_bus; 540 llis_va[num_llis].lli |= bd->lli_bus;
552 541
553 if (cctl & PL080_CONTROL_SRC_INCR) 542 if (cctl & PL080_CONTROL_SRC_INCR)
@@ -560,16 +549,12 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
560 bd->remainder -= len; 549 bd->remainder -= len;
561} 550}
562 551
563/* 552static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
564 * Return number of bytes to fill to boundary, or len. 553 u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
565 * This calculation works for any value of addr.
566 */
567static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
568{ 554{
569 size_t boundary_len = PL08X_BOUNDARY_SIZE - 555 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
570 (addr & (PL08X_BOUNDARY_SIZE - 1)); 556 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
571 557 (*total_bytes) += len;
572 return min(boundary_len, len);
573} 558}
574 559
575/* 560/*
@@ -583,13 +568,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
583 struct pl08x_bus_data *mbus, *sbus; 568 struct pl08x_bus_data *mbus, *sbus;
584 struct pl08x_lli_build_data bd; 569 struct pl08x_lli_build_data bd;
585 int num_llis = 0; 570 int num_llis = 0;
586 u32 cctl; 571 u32 cctl, early_bytes = 0;
587 size_t max_bytes_per_lli; 572 size_t max_bytes_per_lli, total_bytes;
588 size_t total_bytes = 0;
589 struct pl08x_lli *llis_va; 573 struct pl08x_lli *llis_va;
574 struct pl08x_sg *dsg;
590 575
591 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 576 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
592 &txd->llis_bus);
593 if (!txd->llis_va) { 577 if (!txd->llis_va) {
594 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 578 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
595 return 0; 579 return 0;
@@ -597,13 +581,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
597 581
598 pl08x->pool_ctr++; 582 pl08x->pool_ctr++;
599 583
600 /* Get the default CCTL */
601 cctl = txd->cctl;
602
603 bd.txd = txd; 584 bd.txd = txd;
604 bd.srcbus.addr = txd->src_addr;
605 bd.dstbus.addr = txd->dst_addr;
606 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 585 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
586 cctl = txd->cctl;
607 587
608 /* Find maximum width of the source bus */ 588 /* Find maximum width of the source bus */
609 bd.srcbus.maxwidth = 589 bd.srcbus.maxwidth =
@@ -615,215 +595,179 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
615 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 595 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
616 PL080_CONTROL_DWIDTH_SHIFT); 596 PL080_CONTROL_DWIDTH_SHIFT);
617 597
618 /* Set up the bus widths to the maximum */ 598 list_for_each_entry(dsg, &txd->dsg_list, node) {
619 bd.srcbus.buswidth = bd.srcbus.maxwidth; 599 total_bytes = 0;
620 bd.dstbus.buswidth = bd.dstbus.maxwidth; 600 cctl = txd->cctl;
621 601
622 /* 602 bd.srcbus.addr = dsg->src_addr;
623 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 603 bd.dstbus.addr = dsg->dst_addr;
624 */ 604 bd.remainder = dsg->len;
625 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * 605 bd.srcbus.buswidth = bd.srcbus.maxwidth;
626 PL080_CONTROL_TRANSFER_SIZE_MASK; 606 bd.dstbus.buswidth = bd.dstbus.maxwidth;
627 607
628 /* We need to count this down to zero */ 608 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
629 bd.remainder = txd->len;
630 609
631 /* 610 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
632 * Choose bus to align to 611 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
633 * - prefers destination bus if both available 612 bd.srcbus.buswidth,
634 * - if fixed address on one bus chooses other 613 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
635 */ 614 bd.dstbus.buswidth,
636 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 615 bd.remainder);
637 616 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
638 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", 617 mbus == &bd.srcbus ? "src" : "dst",
639 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 618 sbus == &bd.srcbus ? "src" : "dst");
640 bd.srcbus.buswidth,
641 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
642 bd.dstbus.buswidth,
643 bd.remainder, max_bytes_per_lli);
644 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
645 mbus == &bd.srcbus ? "src" : "dst",
646 sbus == &bd.srcbus ? "src" : "dst");
647
648 if (txd->len < mbus->buswidth) {
649 /* Less than a bus width available - send as single bytes */
650 while (bd.remainder) {
651 dev_vdbg(&pl08x->adev->dev,
652 "%s single byte LLIs for a transfer of "
653 "less than a bus width (remain 0x%08x)\n",
654 __func__, bd.remainder);
655 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
656 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
657 total_bytes++;
658 }
659 } else {
660 /* Make one byte LLIs until master bus is aligned */
661 while ((mbus->addr) % (mbus->buswidth)) {
662 dev_vdbg(&pl08x->adev->dev,
663 "%s adjustment lli for less than bus width "
664 "(remain 0x%08x)\n",
665 __func__, bd.remainder);
666 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
667 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
668 total_bytes++;
669 }
670 619
671 /* 620 /*
672 * Master now aligned 621 * Zero length is only allowed if all these requirements are
673 * - if slave is not then we must set its width down 622 * met:
623 * - flow controller is peripheral.
624 * - src.addr is aligned to src.width
625 * - dst.addr is aligned to dst.width
626 *
627 * sg_len == 1 should be true, as there can be two cases here:
628 *
629 * - Memory addresses are contiguous and are not scattered.
630 * Here, Only one sg will be passed by user driver, with
631 * memory address and zero length. We pass this to controller
632 * and after the transfer it will receive the last burst
633 * request from peripheral and so transfer finishes.
634 *
635 * - Memory addresses are scattered and are not contiguous.
636 * Here, Obviously as DMA controller doesn't know when a lli's
637 * transfer gets over, it can't load next lli. So in this
638 * case, there has to be an assumption that only one lli is
639 * supported. Thus, we can't have scattered addresses.
674 */ 640 */
675 if (sbus->addr % sbus->buswidth) { 641 if (!bd.remainder) {
676 dev_dbg(&pl08x->adev->dev, 642 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
677 "%s set down bus width to one byte\n", 643 PL080_CONFIG_FLOW_CONTROL_SHIFT;
678 __func__); 644 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
645 (fc <= PL080_FLOW_SRC2DST_SRC))) {
646 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
647 __func__);
648 return 0;
649 }
650
651 if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
652 (bd.srcbus.addr % bd.srcbus.buswidth)) {
653 dev_err(&pl08x->adev->dev,
654 "%s src & dst address must be aligned to src"
655 " & dst width if peripheral is flow controller",
656 __func__);
657 return 0;
658 }
679 659
680 sbus->buswidth = 1; 660 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
661 bd.dstbus.buswidth, 0);
662 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
663 break;
681 } 664 }
682 665
683 /* 666 /*
684 * Make largest possible LLIs until less than one bus 667 * Send byte by byte for following cases
685 * width left 668 * - Less than a bus width available
669 * - until master bus is aligned
686 */ 670 */
687 while (bd.remainder > (mbus->buswidth - 1)) { 671 if (bd.remainder < mbus->buswidth)
688 size_t lli_len, target_len, tsize, odd_bytes; 672 early_bytes = bd.remainder;
673 else if ((mbus->addr) % (mbus->buswidth)) {
674 early_bytes = mbus->buswidth - (mbus->addr) %
675 (mbus->buswidth);
676 if ((bd.remainder - early_bytes) < mbus->buswidth)
677 early_bytes = bd.remainder;
678 }
689 679
680 if (early_bytes) {
681 dev_vdbg(&pl08x->adev->dev,
682 "%s byte width LLIs (remain 0x%08x)\n",
683 __func__, bd.remainder);
684 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
685 &total_bytes);
686 }
687
688 if (bd.remainder) {
690 /* 689 /*
691 * If enough left try to send max possible, 690 * Master now aligned
692 * otherwise try to send the remainder 691 * - if slave is not then we must set its width down
693 */ 692 */
694 target_len = min(bd.remainder, max_bytes_per_lli); 693 if (sbus->addr % sbus->buswidth) {
694 dev_dbg(&pl08x->adev->dev,
695 "%s set down bus width to one byte\n",
696 __func__);
697
698 sbus->buswidth = 1;
699 }
695 700
696 /* 701 /*
697 * Set bus lengths for incrementing buses to the 702 * Bytes transferred = tsize * src width, not
698 * number of bytes which fill to next memory boundary, 703 * MIN(buswidths)
699 * limiting on the target length calculated above.
700 */ 704 */
701 if (cctl & PL080_CONTROL_SRC_INCR) 705 max_bytes_per_lli = bd.srcbus.buswidth *
702 bd.srcbus.fill_bytes = 706 PL080_CONTROL_TRANSFER_SIZE_MASK;
703 pl08x_pre_boundary(bd.srcbus.addr, 707 dev_vdbg(&pl08x->adev->dev,
704 target_len); 708 "%s max bytes per lli = %zu\n",
705 else 709 __func__, max_bytes_per_lli);
706 bd.srcbus.fill_bytes = target_len; 710
707 711 /*
708 if (cctl & PL080_CONTROL_DST_INCR) 712 * Make largest possible LLIs until less than one bus
709 bd.dstbus.fill_bytes = 713 * width left
710 pl08x_pre_boundary(bd.dstbus.addr, 714 */
711 target_len); 715 while (bd.remainder > (mbus->buswidth - 1)) {
712 else 716 size_t lli_len, tsize, width;
713 bd.dstbus.fill_bytes = target_len;
714
715 /* Find the nearest */
716 lli_len = min(bd.srcbus.fill_bytes,
717 bd.dstbus.fill_bytes);
718
719 BUG_ON(lli_len > bd.remainder);
720
721 if (lli_len <= 0) {
722 dev_err(&pl08x->adev->dev,
723 "%s lli_len is %zu, <= 0\n",
724 __func__, lli_len);
725 return 0;
726 }
727 717
728 if (lli_len == target_len) {
729 /*
730 * Can send what we wanted.
731 * Maintain alignment
732 */
733 lli_len = (lli_len/mbus->buswidth) *
734 mbus->buswidth;
735 odd_bytes = 0;
736 } else {
737 /* 718 /*
738 * So now we know how many bytes to transfer 719 * If enough left try to send max possible,
739 * to get to the nearest boundary. The next 720 * otherwise try to send the remainder
740 * LLI will past the boundary. However, we
741 * may be working to a boundary on the slave
742 * bus. We need to ensure the master stays
743 * aligned, and that we are working in
744 * multiples of the bus widths.
745 */ 721 */
746 odd_bytes = lli_len % mbus->buswidth; 722 lli_len = min(bd.remainder, max_bytes_per_lli);
747 lli_len -= odd_bytes;
748
749 }
750 723
751 if (lli_len) {
752 /* 724 /*
753 * Check against minimum bus alignment: 725 * Check against maximum bus alignment:
754 * Calculate actual transfer size in relation 726 * Calculate actual transfer size in relation to
755 * to bus width an get a maximum remainder of 727 * bus width an get a maximum remainder of the
756 * the smallest bus width - 1 728 * highest bus width - 1
757 */ 729 */
758 /* FIXME: use round_down()? */ 730 width = max(mbus->buswidth, sbus->buswidth);
759 tsize = lli_len / min(mbus->buswidth, 731 lli_len = (lli_len / width) * width;
760 sbus->buswidth); 732 tsize = lli_len / bd.srcbus.buswidth;
761 lli_len = tsize * min(mbus->buswidth,
762 sbus->buswidth);
763
764 if (target_len != lli_len) {
765 dev_vdbg(&pl08x->adev->dev,
766 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
767 __func__, target_len, lli_len, txd->len);
768 }
769
770 cctl = pl08x_cctl_bits(cctl,
771 bd.srcbus.buswidth,
772 bd.dstbus.buswidth,
773 tsize);
774 733
775 dev_vdbg(&pl08x->adev->dev, 734 dev_vdbg(&pl08x->adev->dev,
776 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", 735 "%s fill lli with single lli chunk of "
736 "size 0x%08zx (remainder 0x%08zx)\n",
777 __func__, lli_len, bd.remainder); 737 __func__, lli_len, bd.remainder);
738
739 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
740 bd.dstbus.buswidth, tsize);
778 pl08x_fill_lli_for_desc(&bd, num_llis++, 741 pl08x_fill_lli_for_desc(&bd, num_llis++,
779 lli_len, cctl); 742 lli_len, cctl);
780 total_bytes += lli_len; 743 total_bytes += lli_len;
781 } 744 }
782 745
783 746 /*
784 if (odd_bytes) { 747 * Send any odd bytes
785 /* 748 */
786 * Creep past the boundary, maintaining 749 if (bd.remainder) {
787 * master alignment 750 dev_vdbg(&pl08x->adev->dev,
788 */ 751 "%s align with boundary, send odd bytes (remain %zu)\n",
789 int j; 752 __func__, bd.remainder);
790 for (j = 0; (j < mbus->buswidth) 753 prep_byte_width_lli(&bd, &cctl, bd.remainder,
791 && (bd.remainder); j++) { 754 num_llis++, &total_bytes);
792 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
793 dev_vdbg(&pl08x->adev->dev,
794 "%s align with boundary, single byte (remain 0x%08zx)\n",
795 __func__, bd.remainder);
796 pl08x_fill_lli_for_desc(&bd,
797 num_llis++, 1, cctl);
798 total_bytes++;
799 }
800 } 755 }
801 } 756 }
802 757
803 /* 758 if (total_bytes != dsg->len) {
804 * Send any odd bytes 759 dev_err(&pl08x->adev->dev,
805 */ 760 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
806 while (bd.remainder) { 761 __func__, total_bytes, dsg->len);
807 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 762 return 0;
808 dev_vdbg(&pl08x->adev->dev,
809 "%s align with boundary, single odd byte (remain %zu)\n",
810 __func__, bd.remainder);
811 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
812 total_bytes++;
813 } 763 }
814 }
815 if (total_bytes != txd->len) {
816 dev_err(&pl08x->adev->dev,
817 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
818 __func__, total_bytes, txd->len);
819 return 0;
820 }
821 764
822 if (num_llis >= MAX_NUM_TSFR_LLIS) { 765 if (num_llis >= MAX_NUM_TSFR_LLIS) {
823 dev_err(&pl08x->adev->dev, 766 dev_err(&pl08x->adev->dev,
824 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 767 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
825 __func__, (u32) MAX_NUM_TSFR_LLIS); 768 __func__, (u32) MAX_NUM_TSFR_LLIS);
826 return 0; 769 return 0;
770 }
827 } 771 }
828 772
829 llis_va = txd->llis_va; 773 llis_va = txd->llis_va;
@@ -856,11 +800,19 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
856static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 800static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
857 struct pl08x_txd *txd) 801 struct pl08x_txd *txd)
858{ 802{
803 struct pl08x_sg *dsg, *_dsg;
804
859 /* Free the LLI */ 805 /* Free the LLI */
860 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 806 if (txd->llis_va)
807 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
861 808
862 pl08x->pool_ctr--; 809 pl08x->pool_ctr--;
863 810
811 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
812 list_del(&dsg->node);
813 kfree(dsg);
814 }
815
864 kfree(txd); 816 kfree(txd);
865} 817}
866 818
@@ -917,9 +869,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
917 * need, but for slaves the physical signals may be muxed! 869 * need, but for slaves the physical signals may be muxed!
918 * Can the platform allow us to use this channel? 870 * Can the platform allow us to use this channel?
919 */ 871 */
920 if (plchan->slave && 872 if (plchan->slave && pl08x->pd->get_signal) {
921 ch->signal < 0 &&
922 pl08x->pd->get_signal) {
923 ret = pl08x->pd->get_signal(plchan); 873 ret = pl08x->pd->get_signal(plchan);
924 if (ret < 0) { 874 if (ret < 0) {
925 dev_dbg(&pl08x->adev->dev, 875 dev_dbg(&pl08x->adev->dev,
@@ -1008,10 +958,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1008 * If slaves are relying on interrupts to signal completion this function 958 * If slaves are relying on interrupts to signal completion this function
1009 * must not be called with interrupts disabled. 959 * must not be called with interrupts disabled.
1010 */ 960 */
1011static enum dma_status 961static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1012pl08x_dma_tx_status(struct dma_chan *chan, 962 dma_cookie_t cookie, struct dma_tx_state *txstate)
1013 dma_cookie_t cookie,
1014 struct dma_tx_state *txstate)
1015{ 963{
1016 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 964 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1017 dma_cookie_t last_used; 965 dma_cookie_t last_used;
@@ -1253,7 +1201,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1253 1201
1254 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1202 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1255 if (!num_llis) { 1203 if (!num_llis) {
1256 kfree(txd); 1204 spin_lock_irqsave(&plchan->lock, flags);
1205 pl08x_free_txd(pl08x, txd);
1206 spin_unlock_irqrestore(&plchan->lock, flags);
1257 return -EINVAL; 1207 return -EINVAL;
1258 } 1208 }
1259 1209
@@ -1301,13 +1251,14 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1301static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1251static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1302 unsigned long flags) 1252 unsigned long flags)
1303{ 1253{
1304 struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1254 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1305 1255
1306 if (txd) { 1256 if (txd) {
1307 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); 1257 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1308 txd->tx.flags = flags; 1258 txd->tx.flags = flags;
1309 txd->tx.tx_submit = pl08x_tx_submit; 1259 txd->tx.tx_submit = pl08x_tx_submit;
1310 INIT_LIST_HEAD(&txd->node); 1260 INIT_LIST_HEAD(&txd->node);
1261 INIT_LIST_HEAD(&txd->dsg_list);
1311 1262
1312 /* Always enable error and terminal interrupts */ 1263 /* Always enable error and terminal interrupts */
1313 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1264 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
@@ -1326,6 +1277,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1326 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1277 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1327 struct pl08x_driver_data *pl08x = plchan->host; 1278 struct pl08x_driver_data *pl08x = plchan->host;
1328 struct pl08x_txd *txd; 1279 struct pl08x_txd *txd;
1280 struct pl08x_sg *dsg;
1329 int ret; 1281 int ret;
1330 1282
1331 txd = pl08x_get_txd(plchan, flags); 1283 txd = pl08x_get_txd(plchan, flags);
@@ -1335,10 +1287,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1335 return NULL; 1287 return NULL;
1336 } 1288 }
1337 1289
1290 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1291 if (!dsg) {
1292 pl08x_free_txd(pl08x, txd);
1293 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1294 __func__);
1295 return NULL;
1296 }
1297 list_add_tail(&dsg->node, &txd->dsg_list);
1298
1338 txd->direction = DMA_NONE; 1299 txd->direction = DMA_NONE;
1339 txd->src_addr = src; 1300 dsg->src_addr = src;
1340 txd->dst_addr = dest; 1301 dsg->dst_addr = dest;
1341 txd->len = len; 1302 dsg->len = len;
1342 1303
1343 /* Set platform data for m2m */ 1304 /* Set platform data for m2m */
1344 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1305 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1367,19 +1328,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1367 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1328 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1368 struct pl08x_driver_data *pl08x = plchan->host; 1329 struct pl08x_driver_data *pl08x = plchan->host;
1369 struct pl08x_txd *txd; 1330 struct pl08x_txd *txd;
1370 int ret; 1331 struct pl08x_sg *dsg;
1371 1332 struct scatterlist *sg;
1372 /* 1333 dma_addr_t slave_addr;
1373 * Current implementation ASSUMES only one sg 1334 int ret, tmp;
1374 */
1375 if (sg_len != 1) {
1376 dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
1377 __func__);
1378 BUG();
1379 }
1380 1335
1381 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1336 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1382 __func__, sgl->length, plchan->name); 1337 __func__, sgl->length, plchan->name);
1383 1338
1384 txd = pl08x_get_txd(plchan, flags); 1339 txd = pl08x_get_txd(plchan, flags);
1385 if (!txd) { 1340 if (!txd) {
@@ -1398,24 +1353,49 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1398 * channel target address dynamically at runtime. 1353 * channel target address dynamically at runtime.
1399 */ 1354 */
1400 txd->direction = direction; 1355 txd->direction = direction;
1401 txd->len = sgl->length;
1402 1356
1403 if (direction == DMA_TO_DEVICE) { 1357 if (direction == DMA_TO_DEVICE) {
1404 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1405 txd->cctl = plchan->dst_cctl; 1358 txd->cctl = plchan->dst_cctl;
1406 txd->src_addr = sgl->dma_address; 1359 slave_addr = plchan->dst_addr;
1407 txd->dst_addr = plchan->dst_addr;
1408 } else if (direction == DMA_FROM_DEVICE) { 1360 } else if (direction == DMA_FROM_DEVICE) {
1409 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1410 txd->cctl = plchan->src_cctl; 1361 txd->cctl = plchan->src_cctl;
1411 txd->src_addr = plchan->src_addr; 1362 slave_addr = plchan->src_addr;
1412 txd->dst_addr = sgl->dma_address;
1413 } else { 1363 } else {
1364 pl08x_free_txd(pl08x, txd);
1414 dev_err(&pl08x->adev->dev, 1365 dev_err(&pl08x->adev->dev,
1415 "%s direction unsupported\n", __func__); 1366 "%s direction unsupported\n", __func__);
1416 return NULL; 1367 return NULL;
1417 } 1368 }
1418 1369
1370 if (plchan->cd->device_fc)
1371 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
1372 PL080_FLOW_PER2MEM_PER;
1373 else
1374 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
1375 PL080_FLOW_PER2MEM;
1376
1377 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1378
1379 for_each_sg(sgl, sg, sg_len, tmp) {
1380 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1381 if (!dsg) {
1382 pl08x_free_txd(pl08x, txd);
1383 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1384 __func__);
1385 return NULL;
1386 }
1387 list_add_tail(&dsg->node, &txd->dsg_list);
1388
1389 dsg->len = sg_dma_len(sg);
1390 if (direction == DMA_TO_DEVICE) {
1391 dsg->src_addr = sg_phys(sg);
1392 dsg->dst_addr = slave_addr;
1393 } else {
1394 dsg->src_addr = slave_addr;
1395 dsg->dst_addr = sg_phys(sg);
1396 }
1397 }
1398
1419 ret = pl08x_prep_channel_resources(plchan, txd); 1399 ret = pl08x_prep_channel_resources(plchan, txd);
1420 if (ret) 1400 if (ret)
1421 return NULL; 1401 return NULL;
@@ -1489,9 +1469,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1489 1469
1490bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1470bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1491{ 1471{
1492 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1472 struct pl08x_dma_chan *plchan;
1493 char *name = chan_id; 1473 char *name = chan_id;
1494 1474
1475 /* Reject channels for devices not bound to this driver */
1476 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1477 return false;
1478
1479 plchan = to_pl08x_chan(chan);
1480
1495 /* Check that the channel is not taken! */ 1481 /* Check that the channel is not taken! */
1496 if (!strcmp(plchan->name, name)) 1482 if (!strcmp(plchan->name, name))
1497 return true; 1483 return true;
@@ -1507,34 +1493,34 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1507 */ 1493 */
1508static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1494static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1509{ 1495{
1510 u32 val; 1496 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1511
1512 val = readl(pl08x->base + PL080_CONFIG);
1513 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
1514 /* We implicitly clear bit 1 and that means little-endian mode */
1515 val |= PL080_CONFIG_ENABLE;
1516 writel(val, pl08x->base + PL080_CONFIG);
1517} 1497}
1518 1498
1519static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1499static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1520{ 1500{
1521 struct device *dev = txd->tx.chan->device->dev; 1501 struct device *dev = txd->tx.chan->device->dev;
1502 struct pl08x_sg *dsg;
1522 1503
1523 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1504 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1524 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1505 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1525 dma_unmap_single(dev, txd->src_addr, txd->len, 1506 list_for_each_entry(dsg, &txd->dsg_list, node)
1526 DMA_TO_DEVICE); 1507 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1527 else 1508 DMA_TO_DEVICE);
1528 dma_unmap_page(dev, txd->src_addr, txd->len, 1509 else {
1529 DMA_TO_DEVICE); 1510 list_for_each_entry(dsg, &txd->dsg_list, node)
1511 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1512 DMA_TO_DEVICE);
1513 }
1530 } 1514 }
1531 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1515 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1532 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1516 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1533 dma_unmap_single(dev, txd->dst_addr, txd->len, 1517 list_for_each_entry(dsg, &txd->dsg_list, node)
1534 DMA_FROM_DEVICE); 1518 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1519 DMA_FROM_DEVICE);
1535 else 1520 else
1536 dma_unmap_page(dev, txd->dst_addr, txd->len, 1521 list_for_each_entry(dsg, &txd->dsg_list, node)
1537 DMA_FROM_DEVICE); 1522 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1523 DMA_FROM_DEVICE);
1538 } 1524 }
1539} 1525}
1540 1526
@@ -1589,8 +1575,8 @@ static void pl08x_tasklet(unsigned long data)
1589 */ 1575 */
1590 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1576 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1591 chan.device_node) { 1577 chan.device_node) {
1592 if (waiting->state == PL08X_CHAN_WAITING && 1578 if (waiting->state == PL08X_CHAN_WAITING &&
1593 waiting->waiting != NULL) { 1579 waiting->waiting != NULL) {
1594 int ret; 1580 int ret;
1595 1581
1596 /* This should REALLY not fail now */ 1582 /* This should REALLY not fail now */
@@ -1630,38 +1616,40 @@ static void pl08x_tasklet(unsigned long data)
1630static irqreturn_t pl08x_irq(int irq, void *dev) 1616static irqreturn_t pl08x_irq(int irq, void *dev)
1631{ 1617{
1632 struct pl08x_driver_data *pl08x = dev; 1618 struct pl08x_driver_data *pl08x = dev;
1633 u32 mask = 0; 1619 u32 mask = 0, err, tc, i;
1634 u32 val; 1620
1635 int i; 1621 /* check & clear - ERR & TC interrupts */
1636 1622 err = readl(pl08x->base + PL080_ERR_STATUS);
1637 val = readl(pl08x->base + PL080_ERR_STATUS); 1623 if (err) {
1638 if (val) { 1624 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1639 /* An error interrupt (on one or more channels) */ 1625 __func__, err);
1640 dev_err(&pl08x->adev->dev, 1626 writel(err, pl08x->base + PL080_ERR_CLEAR);
1641 "%s error interrupt, register value 0x%08x\n",
1642 __func__, val);
1643 /*
1644 * Simply clear ALL PL08X error interrupts,
1645 * regardless of channel and cause
1646 * FIXME: should be 0x00000003 on PL081 really.
1647 */
1648 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1649 } 1627 }
1650 val = readl(pl08x->base + PL080_INT_STATUS); 1628 tc = readl(pl08x->base + PL080_INT_STATUS);
1629 if (tc)
1630 writel(tc, pl08x->base + PL080_TC_CLEAR);
1631
1632 if (!err && !tc)
1633 return IRQ_NONE;
1634
1651 for (i = 0; i < pl08x->vd->channels; i++) { 1635 for (i = 0; i < pl08x->vd->channels; i++) {
1652 if ((1 << i) & val) { 1636 if (((1 << i) & err) || ((1 << i) & tc)) {
1653 /* Locate physical channel */ 1637 /* Locate physical channel */
1654 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1638 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1655 struct pl08x_dma_chan *plchan = phychan->serving; 1639 struct pl08x_dma_chan *plchan = phychan->serving;
1656 1640
1641 if (!plchan) {
1642 dev_err(&pl08x->adev->dev,
1643 "%s Error TC interrupt on unused channel: 0x%08x\n",
1644 __func__, i);
1645 continue;
1646 }
1647
1657 /* Schedule tasklet on this channel */ 1648 /* Schedule tasklet on this channel */
1658 tasklet_schedule(&plchan->tasklet); 1649 tasklet_schedule(&plchan->tasklet);
1659
1660 mask |= (1 << i); 1650 mask |= (1 << i);
1661 } 1651 }
1662 } 1652 }
1663 /* Clear only the terminal interrupts on channels we processed */
1664 writel(mask, pl08x->base + PL080_TC_CLEAR);
1665 1653
1666 return mask ? IRQ_HANDLED : IRQ_NONE; 1654 return mask ? IRQ_HANDLED : IRQ_NONE;
1667} 1655}
@@ -1685,9 +1673,7 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1685 * Make a local wrapper to hold required data 1673 * Make a local wrapper to hold required data
1686 */ 1674 */
1687static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1675static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1688 struct dma_device *dmadev, 1676 struct dma_device *dmadev, unsigned int channels, bool slave)
1689 unsigned int channels,
1690 bool slave)
1691{ 1677{
1692 struct pl08x_dma_chan *chan; 1678 struct pl08x_dma_chan *chan;
1693 int i; 1679 int i;
@@ -1700,7 +1686,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1700 * to cope with that situation. 1686 * to cope with that situation.
1701 */ 1687 */
1702 for (i = 0; i < channels; i++) { 1688 for (i = 0; i < channels; i++) {
1703 chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); 1689 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1704 if (!chan) { 1690 if (!chan) {
1705 dev_err(&pl08x->adev->dev, 1691 dev_err(&pl08x->adev->dev,
1706 "%s no memory for channel\n", __func__); 1692 "%s no memory for channel\n", __func__);
@@ -1728,7 +1714,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1728 kfree(chan); 1714 kfree(chan);
1729 continue; 1715 continue;
1730 } 1716 }
1731 dev_info(&pl08x->adev->dev, 1717 dev_dbg(&pl08x->adev->dev,
1732 "initialize virtual channel \"%s\"\n", 1718 "initialize virtual channel \"%s\"\n",
1733 chan->name); 1719 chan->name);
1734 1720
@@ -1837,9 +1823,9 @@ static const struct file_operations pl08x_debugfs_operations = {
1837static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1823static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1838{ 1824{
1839 /* Expose a simple debugfs interface to view all clocks */ 1825 /* Expose a simple debugfs interface to view all clocks */
1840 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, 1826 (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
1841 NULL, pl08x, 1827 S_IFREG | S_IRUGO, NULL, pl08x,
1842 &pl08x_debugfs_operations); 1828 &pl08x_debugfs_operations);
1843} 1829}
1844 1830
1845#else 1831#else
@@ -1860,12 +1846,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1860 return ret; 1846 return ret;
1861 1847
1862 /* Create the driver state holder */ 1848 /* Create the driver state holder */
1863 pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); 1849 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
1864 if (!pl08x) { 1850 if (!pl08x) {
1865 ret = -ENOMEM; 1851 ret = -ENOMEM;
1866 goto out_no_pl08x; 1852 goto out_no_pl08x;
1867 } 1853 }
1868 1854
1855 pm_runtime_set_active(&adev->dev);
1856 pm_runtime_enable(&adev->dev);
1857
1869 /* Initialize memcpy engine */ 1858 /* Initialize memcpy engine */
1870 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1859 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1871 pl08x->memcpy.dev = &adev->dev; 1860 pl08x->memcpy.dev = &adev->dev;
@@ -1939,7 +1928,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1939 } 1928 }
1940 1929
1941 /* Initialize physical channels */ 1930 /* Initialize physical channels */
1942 pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), 1931 pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
1943 GFP_KERNEL); 1932 GFP_KERNEL);
1944 if (!pl08x->phy_chans) { 1933 if (!pl08x->phy_chans) {
1945 dev_err(&adev->dev, "%s failed to allocate " 1934 dev_err(&adev->dev, "%s failed to allocate "
@@ -1956,9 +1945,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1956 spin_lock_init(&ch->lock); 1945 spin_lock_init(&ch->lock);
1957 ch->serving = NULL; 1946 ch->serving = NULL;
1958 ch->signal = -1; 1947 ch->signal = -1;
1959 dev_info(&adev->dev, 1948 dev_dbg(&adev->dev, "physical channel %d is %s\n",
1960 "physical channel %d is %s\n", i, 1949 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1961 pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1962 } 1950 }
1963 1951
1964 /* Register as many memcpy channels as there are physical channels */ 1952 /* Register as many memcpy channels as there are physical channels */
@@ -1974,8 +1962,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1974 1962
1975 /* Register slave channels */ 1963 /* Register slave channels */
1976 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1964 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
1977 pl08x->pd->num_slave_channels, 1965 pl08x->pd->num_slave_channels, true);
1978 true);
1979 if (ret <= 0) { 1966 if (ret <= 0) {
1980 dev_warn(&pl08x->adev->dev, 1967 dev_warn(&pl08x->adev->dev,
1981 "%s failed to enumerate slave channels - %d\n", 1968 "%s failed to enumerate slave channels - %d\n",
@@ -2005,6 +1992,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2005 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 1992 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2006 amba_part(adev), amba_rev(adev), 1993 amba_part(adev), amba_rev(adev),
2007 (unsigned long long)adev->res.start, adev->irq[0]); 1994 (unsigned long long)adev->res.start, adev->irq[0]);
1995
1996 pm_runtime_put(&adev->dev);
2008 return 0; 1997 return 0;
2009 1998
2010out_no_slave_reg: 1999out_no_slave_reg:
@@ -2023,6 +2012,9 @@ out_no_ioremap:
2023 dma_pool_destroy(pl08x->pool); 2012 dma_pool_destroy(pl08x->pool);
2024out_no_lli_pool: 2013out_no_lli_pool:
2025out_no_platdata: 2014out_no_platdata:
2015 pm_runtime_put(&adev->dev);
2016 pm_runtime_disable(&adev->dev);
2017
2026 kfree(pl08x); 2018 kfree(pl08x);
2027out_no_pl08x: 2019out_no_pl08x:
2028 amba_release_regions(adev); 2020 amba_release_regions(adev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 6a483eac7b3f..fcfa0a8b5c59 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
107{ 107{
108 struct at_desc *desc, *_desc; 108 struct at_desc *desc, *_desc;
109 struct at_desc *ret = NULL; 109 struct at_desc *ret = NULL;
110 unsigned long flags;
110 unsigned int i = 0; 111 unsigned int i = 0;
111 LIST_HEAD(tmp_list); 112 LIST_HEAD(tmp_list);
112 113
113 spin_lock_bh(&atchan->lock); 114 spin_lock_irqsave(&atchan->lock, flags);
114 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 115 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
115 i++; 116 i++;
116 if (async_tx_test_ack(&desc->txd)) { 117 if (async_tx_test_ack(&desc->txd)) {
@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
121 dev_dbg(chan2dev(&atchan->chan_common), 122 dev_dbg(chan2dev(&atchan->chan_common),
122 "desc %p not ACKed\n", desc); 123 "desc %p not ACKed\n", desc);
123 } 124 }
124 spin_unlock_bh(&atchan->lock); 125 spin_unlock_irqrestore(&atchan->lock, flags);
125 dev_vdbg(chan2dev(&atchan->chan_common), 126 dev_vdbg(chan2dev(&atchan->chan_common),
126 "scanned %u descriptors on freelist\n", i); 127 "scanned %u descriptors on freelist\n", i);
127 128
@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
129 if (!ret) { 130 if (!ret) {
130 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 131 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
131 if (ret) { 132 if (ret) {
132 spin_lock_bh(&atchan->lock); 133 spin_lock_irqsave(&atchan->lock, flags);
133 atchan->descs_allocated++; 134 atchan->descs_allocated++;
134 spin_unlock_bh(&atchan->lock); 135 spin_unlock_irqrestore(&atchan->lock, flags);
135 } else { 136 } else {
136 dev_err(chan2dev(&atchan->chan_common), 137 dev_err(chan2dev(&atchan->chan_common),
137 "not enough descriptors available\n"); 138 "not enough descriptors available\n");
@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
150{ 151{
151 if (desc) { 152 if (desc) {
152 struct at_desc *child; 153 struct at_desc *child;
154 unsigned long flags;
153 155
154 spin_lock_bh(&atchan->lock); 156 spin_lock_irqsave(&atchan->lock, flags);
155 list_for_each_entry(child, &desc->tx_list, desc_node) 157 list_for_each_entry(child, &desc->tx_list, desc_node)
156 dev_vdbg(chan2dev(&atchan->chan_common), 158 dev_vdbg(chan2dev(&atchan->chan_common),
157 "moving child desc %p to freelist\n", 159 "moving child desc %p to freelist\n",
@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
160 dev_vdbg(chan2dev(&atchan->chan_common), 162 dev_vdbg(chan2dev(&atchan->chan_common),
161 "moving desc %p to freelist\n", desc); 163 "moving desc %p to freelist\n", desc);
162 list_add(&desc->desc_node, &atchan->free_list); 164 list_add(&desc->desc_node, &atchan->free_list);
163 spin_unlock_bh(&atchan->lock); 165 spin_unlock_irqrestore(&atchan->lock, flags);
164 } 166 }
165} 167}
166 168
@@ -299,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
299 301
300 /* for cyclic transfers, 302 /* for cyclic transfers,
301 * no need to replay callback function while stopping */ 303 * no need to replay callback function while stopping */
302 if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { 304 if (!atc_chan_is_cyclic(atchan)) {
303 dma_async_tx_callback callback = txd->callback; 305 dma_async_tx_callback callback = txd->callback;
304 void *param = txd->callback_param; 306 void *param = txd->callback_param;
305 307
@@ -471,16 +473,17 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
471static void atc_tasklet(unsigned long data) 473static void atc_tasklet(unsigned long data)
472{ 474{
473 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 475 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
476 unsigned long flags;
474 477
475 spin_lock(&atchan->lock); 478 spin_lock_irqsave(&atchan->lock, flags);
476 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 479 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
477 atc_handle_error(atchan); 480 atc_handle_error(atchan);
478 else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 481 else if (atc_chan_is_cyclic(atchan))
479 atc_handle_cyclic(atchan); 482 atc_handle_cyclic(atchan);
480 else 483 else
481 atc_advance_work(atchan); 484 atc_advance_work(atchan);
482 485
483 spin_unlock(&atchan->lock); 486 spin_unlock_irqrestore(&atchan->lock, flags);
484} 487}
485 488
486static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 489static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
539 struct at_desc *desc = txd_to_at_desc(tx); 542 struct at_desc *desc = txd_to_at_desc(tx);
540 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 543 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
541 dma_cookie_t cookie; 544 dma_cookie_t cookie;
545 unsigned long flags;
542 546
543 spin_lock_bh(&atchan->lock); 547 spin_lock_irqsave(&atchan->lock, flags);
544 cookie = atc_assign_cookie(atchan, desc); 548 cookie = atc_assign_cookie(atchan, desc);
545 549
546 if (list_empty(&atchan->active_list)) { 550 if (list_empty(&atchan->active_list)) {
@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
554 list_add_tail(&desc->desc_node, &atchan->queue); 558 list_add_tail(&desc->desc_node, &atchan->queue);
555 } 559 }
556 560
557 spin_unlock_bh(&atchan->lock); 561 spin_unlock_irqrestore(&atchan->lock, flags);
558 562
559 return cookie; 563 return cookie;
560} 564}
@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
927 struct at_dma_chan *atchan = to_at_dma_chan(chan); 931 struct at_dma_chan *atchan = to_at_dma_chan(chan);
928 struct at_dma *atdma = to_at_dma(chan->device); 932 struct at_dma *atdma = to_at_dma(chan->device);
929 int chan_id = atchan->chan_common.chan_id; 933 int chan_id = atchan->chan_common.chan_id;
934 unsigned long flags;
930 935
931 LIST_HEAD(list); 936 LIST_HEAD(list);
932 937
933 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 938 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
934 939
935 if (cmd == DMA_PAUSE) { 940 if (cmd == DMA_PAUSE) {
936 spin_lock_bh(&atchan->lock); 941 spin_lock_irqsave(&atchan->lock, flags);
937 942
938 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 943 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
939 set_bit(ATC_IS_PAUSED, &atchan->status); 944 set_bit(ATC_IS_PAUSED, &atchan->status);
940 945
941 spin_unlock_bh(&atchan->lock); 946 spin_unlock_irqrestore(&atchan->lock, flags);
942 } else if (cmd == DMA_RESUME) { 947 } else if (cmd == DMA_RESUME) {
943 if (!test_bit(ATC_IS_PAUSED, &atchan->status)) 948 if (!atc_chan_is_paused(atchan))
944 return 0; 949 return 0;
945 950
946 spin_lock_bh(&atchan->lock); 951 spin_lock_irqsave(&atchan->lock, flags);
947 952
948 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 953 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
949 clear_bit(ATC_IS_PAUSED, &atchan->status); 954 clear_bit(ATC_IS_PAUSED, &atchan->status);
950 955
951 spin_unlock_bh(&atchan->lock); 956 spin_unlock_irqrestore(&atchan->lock, flags);
952 } else if (cmd == DMA_TERMINATE_ALL) { 957 } else if (cmd == DMA_TERMINATE_ALL) {
953 struct at_desc *desc, *_desc; 958 struct at_desc *desc, *_desc;
954 /* 959 /*
@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
957 * channel. We still have to poll the channel enable bit due 962 * channel. We still have to poll the channel enable bit due
958 * to AHB/HSB limitations. 963 * to AHB/HSB limitations.
959 */ 964 */
960 spin_lock_bh(&atchan->lock); 965 spin_lock_irqsave(&atchan->lock, flags);
961 966
962 /* disabling channel: must also remove suspend state */ 967 /* disabling channel: must also remove suspend state */
963 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 968 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
978 /* if channel dedicated to cyclic operations, free it */ 983 /* if channel dedicated to cyclic operations, free it */
979 clear_bit(ATC_IS_CYCLIC, &atchan->status); 984 clear_bit(ATC_IS_CYCLIC, &atchan->status);
980 985
981 spin_unlock_bh(&atchan->lock); 986 spin_unlock_irqrestore(&atchan->lock, flags);
982 } else { 987 } else {
983 return -ENXIO; 988 return -ENXIO;
984 } 989 }
@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
1004 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1009 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1005 dma_cookie_t last_used; 1010 dma_cookie_t last_used;
1006 dma_cookie_t last_complete; 1011 dma_cookie_t last_complete;
1012 unsigned long flags;
1007 enum dma_status ret; 1013 enum dma_status ret;
1008 1014
1009 spin_lock_bh(&atchan->lock); 1015 spin_lock_irqsave(&atchan->lock, flags);
1010 1016
1011 last_complete = atchan->completed_cookie; 1017 last_complete = atchan->completed_cookie;
1012 last_used = chan->cookie; 1018 last_used = chan->cookie;
@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
1021 ret = dma_async_is_complete(cookie, last_complete, last_used); 1027 ret = dma_async_is_complete(cookie, last_complete, last_used);
1022 } 1028 }
1023 1029
1024 spin_unlock_bh(&atchan->lock); 1030 spin_unlock_irqrestore(&atchan->lock, flags);
1025 1031
1026 if (ret != DMA_SUCCESS) 1032 if (ret != DMA_SUCCESS)
1027 dma_set_tx_state(txstate, last_complete, last_used, 1033 dma_set_tx_state(txstate, last_complete, last_used,
@@ -1029,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
1029 else 1035 else
1030 dma_set_tx_state(txstate, last_complete, last_used, 0); 1036 dma_set_tx_state(txstate, last_complete, last_used, 0);
1031 1037
1032 if (test_bit(ATC_IS_PAUSED, &atchan->status)) 1038 if (atc_chan_is_paused(atchan))
1033 ret = DMA_PAUSED; 1039 ret = DMA_PAUSED;
1034 1040
1035 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 1041 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@@ -1046,18 +1052,19 @@ atc_tx_status(struct dma_chan *chan,
1046static void atc_issue_pending(struct dma_chan *chan) 1052static void atc_issue_pending(struct dma_chan *chan)
1047{ 1053{
1048 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1054 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1055 unsigned long flags;
1049 1056
1050 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1057 dev_vdbg(chan2dev(chan), "issue_pending\n");
1051 1058
1052 /* Not needed for cyclic transfers */ 1059 /* Not needed for cyclic transfers */
1053 if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 1060 if (atc_chan_is_cyclic(atchan))
1054 return; 1061 return;
1055 1062
1056 spin_lock_bh(&atchan->lock); 1063 spin_lock_irqsave(&atchan->lock, flags);
1057 if (!atc_chan_is_enabled(atchan)) { 1064 if (!atc_chan_is_enabled(atchan)) {
1058 atc_advance_work(atchan); 1065 atc_advance_work(atchan);
1059 } 1066 }
1060 spin_unlock_bh(&atchan->lock); 1067 spin_unlock_irqrestore(&atchan->lock, flags);
1061} 1068}
1062 1069
1063/** 1070/**
@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1073 struct at_dma *atdma = to_at_dma(chan->device); 1080 struct at_dma *atdma = to_at_dma(chan->device);
1074 struct at_desc *desc; 1081 struct at_desc *desc;
1075 struct at_dma_slave *atslave; 1082 struct at_dma_slave *atslave;
1083 unsigned long flags;
1076 int i; 1084 int i;
1077 u32 cfg; 1085 u32 cfg;
1078 LIST_HEAD(tmp_list); 1086 LIST_HEAD(tmp_list);
@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1116 list_add_tail(&desc->desc_node, &tmp_list); 1124 list_add_tail(&desc->desc_node, &tmp_list);
1117 } 1125 }
1118 1126
1119 spin_lock_bh(&atchan->lock); 1127 spin_lock_irqsave(&atchan->lock, flags);
1120 atchan->descs_allocated = i; 1128 atchan->descs_allocated = i;
1121 list_splice(&tmp_list, &atchan->free_list); 1129 list_splice(&tmp_list, &atchan->free_list);
1122 atchan->completed_cookie = chan->cookie = 1; 1130 atchan->completed_cookie = chan->cookie = 1;
1123 spin_unlock_bh(&atchan->lock); 1131 spin_unlock_irqrestore(&atchan->lock, flags);
1124 1132
1125 /* channel parameters */ 1133 /* channel parameters */
1126 channel_writel(atchan, CFG, cfg); 1134 channel_writel(atchan, CFG, cfg);
@@ -1260,12 +1268,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
1260 1268
1261 /* initialize channels related values */ 1269 /* initialize channels related values */
1262 INIT_LIST_HEAD(&atdma->dma_common.channels); 1270 INIT_LIST_HEAD(&atdma->dma_common.channels);
1263 for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) { 1271 for (i = 0; i < pdata->nr_channels; i++) {
1264 struct at_dma_chan *atchan = &atdma->chan[i]; 1272 struct at_dma_chan *atchan = &atdma->chan[i];
1265 1273
1266 atchan->chan_common.device = &atdma->dma_common; 1274 atchan->chan_common.device = &atdma->dma_common;
1267 atchan->chan_common.cookie = atchan->completed_cookie = 1; 1275 atchan->chan_common.cookie = atchan->completed_cookie = 1;
1268 atchan->chan_common.chan_id = i;
1269 list_add_tail(&atchan->chan_common.device_node, 1276 list_add_tail(&atchan->chan_common.device_node,
1270 &atdma->dma_common.channels); 1277 &atdma->dma_common.channels);
1271 1278
@@ -1293,22 +1300,20 @@ static int __init at_dma_probe(struct platform_device *pdev)
1293 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1300 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1294 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1301 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1295 1302
1296 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) 1303 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1297 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1304 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1298 1305 /* controller can do slave DMA: can trigger cyclic transfers */
1299 if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) 1306 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1300 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1307 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1301
1302 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
1303 dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1304 atdma->dma_common.device_control = atc_control; 1308 atdma->dma_common.device_control = atc_control;
1309 }
1305 1310
1306 dma_writel(atdma, EN, AT_DMA_ENABLE); 1311 dma_writel(atdma, EN, AT_DMA_ENABLE);
1307 1312
1308 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1313 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1309 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1314 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1310 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1315 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1311 atdma->dma_common.chancnt); 1316 pdata->nr_channels);
1312 1317
1313 dma_async_device_register(&atdma->dma_common); 1318 dma_async_device_register(&atdma->dma_common);
1314 1319
@@ -1377,27 +1382,112 @@ static void at_dma_shutdown(struct platform_device *pdev)
1377 clk_disable(atdma->clk); 1382 clk_disable(atdma->clk);
1378} 1383}
1379 1384
1385static int at_dma_prepare(struct device *dev)
1386{
1387 struct platform_device *pdev = to_platform_device(dev);
1388 struct at_dma *atdma = platform_get_drvdata(pdev);
1389 struct dma_chan *chan, *_chan;
1390
1391 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1392 device_node) {
1393 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1394 /* wait for transaction completion (except in cyclic case) */
1395 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1396 return -EAGAIN;
1397 }
1398 return 0;
1399}
1400
1401static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1402{
1403 struct dma_chan *chan = &atchan->chan_common;
1404
1405 /* Channel should be paused by user
1406 * do it anyway even if it is not done already */
1407 if (!atc_chan_is_paused(atchan)) {
1408 dev_warn(chan2dev(chan),
1409 "cyclic channel not paused, should be done by channel user\n");
1410 atc_control(chan, DMA_PAUSE, 0);
1411 }
1412
1413 /* now preserve additional data for cyclic operations */
1414 /* next descriptor address in the cyclic list */
1415 atchan->save_dscr = channel_readl(atchan, DSCR);
1416
1417 vdbg_dump_regs(atchan);
1418}
1419
1380static int at_dma_suspend_noirq(struct device *dev) 1420static int at_dma_suspend_noirq(struct device *dev)
1381{ 1421{
1382 struct platform_device *pdev = to_platform_device(dev); 1422 struct platform_device *pdev = to_platform_device(dev);
1383 struct at_dma *atdma = platform_get_drvdata(pdev); 1423 struct at_dma *atdma = platform_get_drvdata(pdev);
1424 struct dma_chan *chan, *_chan;
1384 1425
1385 at_dma_off(platform_get_drvdata(pdev)); 1426 /* preserve data */
1427 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1428 device_node) {
1429 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1430
1431 if (atc_chan_is_cyclic(atchan))
1432 atc_suspend_cyclic(atchan);
1433 atchan->save_cfg = channel_readl(atchan, CFG);
1434 }
1435 atdma->save_imr = dma_readl(atdma, EBCIMR);
1436
1437 /* disable DMA controller */
1438 at_dma_off(atdma);
1386 clk_disable(atdma->clk); 1439 clk_disable(atdma->clk);
1387 return 0; 1440 return 0;
1388} 1441}
1389 1442
1443static void atc_resume_cyclic(struct at_dma_chan *atchan)
1444{
1445 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1446
1447 /* restore channel status for cyclic descriptors list:
1448 * next descriptor in the cyclic list at the time of suspend */
1449 channel_writel(atchan, SADDR, 0);
1450 channel_writel(atchan, DADDR, 0);
1451 channel_writel(atchan, CTRLA, 0);
1452 channel_writel(atchan, CTRLB, 0);
1453 channel_writel(atchan, DSCR, atchan->save_dscr);
1454 dma_writel(atdma, CHER, atchan->mask);
1455
1456 /* channel pause status should be removed by channel user
1457 * We cannot take the initiative to do it here */
1458
1459 vdbg_dump_regs(atchan);
1460}
1461
1390static int at_dma_resume_noirq(struct device *dev) 1462static int at_dma_resume_noirq(struct device *dev)
1391{ 1463{
1392 struct platform_device *pdev = to_platform_device(dev); 1464 struct platform_device *pdev = to_platform_device(dev);
1393 struct at_dma *atdma = platform_get_drvdata(pdev); 1465 struct at_dma *atdma = platform_get_drvdata(pdev);
1466 struct dma_chan *chan, *_chan;
1394 1467
1468 /* bring back DMA controller */
1395 clk_enable(atdma->clk); 1469 clk_enable(atdma->clk);
1396 dma_writel(atdma, EN, AT_DMA_ENABLE); 1470 dma_writel(atdma, EN, AT_DMA_ENABLE);
1471
1472 /* clear any pending interrupt */
1473 while (dma_readl(atdma, EBCISR))
1474 cpu_relax();
1475
1476 /* restore saved data */
1477 dma_writel(atdma, EBCIER, atdma->save_imr);
1478 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1479 device_node) {
1480 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1481
1482 channel_writel(atchan, CFG, atchan->save_cfg);
1483 if (atc_chan_is_cyclic(atchan))
1484 atc_resume_cyclic(atchan);
1485 }
1397 return 0; 1486 return 0;
1398} 1487}
1399 1488
1400static const struct dev_pm_ops at_dma_dev_pm_ops = { 1489static const struct dev_pm_ops at_dma_dev_pm_ops = {
1490 .prepare = at_dma_prepare,
1401 .suspend_noirq = at_dma_suspend_noirq, 1491 .suspend_noirq = at_dma_suspend_noirq,
1402 .resume_noirq = at_dma_resume_noirq, 1492 .resume_noirq = at_dma_resume_noirq,
1403}; 1493};
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 087dbf1dd39c..aa4c9aebab7c 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -204,6 +204,9 @@ enum atc_status {
204 * @status: transmit status information from irq/prep* functions 204 * @status: transmit status information from irq/prep* functions
205 * to tasklet (use atomic operations) 205 * to tasklet (use atomic operations)
206 * @tasklet: bottom half to finish transaction work 206 * @tasklet: bottom half to finish transaction work
207 * @save_cfg: configuration register that is saved on suspend/resume cycle
208 * @save_dscr: for cyclic operations, preserve next descriptor address in
209 * the cyclic list on suspend/resume cycle
207 * @lock: serializes enqueue/dequeue operations to descriptors lists 210 * @lock: serializes enqueue/dequeue operations to descriptors lists
208 * @completed_cookie: identifier for the most recently completed operation 211 * @completed_cookie: identifier for the most recently completed operation
209 * @active_list: list of descriptors dmaengine is being running on 212 * @active_list: list of descriptors dmaengine is being running on
@@ -218,6 +221,8 @@ struct at_dma_chan {
218 u8 mask; 221 u8 mask;
219 unsigned long status; 222 unsigned long status;
220 struct tasklet_struct tasklet; 223 struct tasklet_struct tasklet;
224 u32 save_cfg;
225 u32 save_dscr;
221 226
222 spinlock_t lock; 227 spinlock_t lock;
223 228
@@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
248 * @chan_common: common dmaengine dma_device object members 253 * @chan_common: common dmaengine dma_device object members
249 * @ch_regs: memory mapped register base 254 * @ch_regs: memory mapped register base
250 * @clk: dma controller clock 255 * @clk: dma controller clock
256 * @save_imr: interrupt mask register that is saved on suspend/resume cycle
251 * @all_chan_mask: all channels availlable in a mask 257 * @all_chan_mask: all channels availlable in a mask
252 * @dma_desc_pool: base of DMA descriptor region (DMA address) 258 * @dma_desc_pool: base of DMA descriptor region (DMA address)
253 * @chan: channels table to store at_dma_chan structures 259 * @chan: channels table to store at_dma_chan structures
@@ -256,6 +262,7 @@ struct at_dma {
256 struct dma_device dma_common; 262 struct dma_device dma_common;
257 void __iomem *regs; 263 void __iomem *regs;
258 struct clk *clk; 264 struct clk *clk;
265 u32 save_imr;
259 266
260 u8 all_chan_mask; 267 u8 all_chan_mask;
261 268
@@ -355,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
355 return !!(dma_readl(atdma, CHSR) & atchan->mask); 362 return !!(dma_readl(atdma, CHSR) & atchan->mask);
356} 363}
357 364
365/**
366 * atc_chan_is_paused - test channel pause/resume status
367 * @atchan: channel we want to test status
368 */
369static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
370{
371 return test_bit(ATC_IS_PAUSED, &atchan->status);
372}
373
374/**
375 * atc_chan_is_cyclic - test if given channel has cyclic property set
376 * @atchan: channel we want to test status
377 */
378static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
379{
380 return test_bit(ATC_IS_CYCLIC, &atchan->status);
381}
358 382
359/** 383/**
360 * set_desc_eol - set end-of-link to descriptor so it will end transfer 384 * set_desc_eol - set end-of-link to descriptor so it will end transfer
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 765f5ff22304..eb1d8641cf5c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -10,6 +10,7 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h> 12#include <linux/dmaengine.h>
13#include <linux/freezer.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/kthread.h> 15#include <linux/kthread.h>
15#include <linux/module.h> 16#include <linux/module.h>
@@ -251,6 +252,7 @@ static int dmatest_func(void *data)
251 int i; 252 int i;
252 253
253 thread_name = current->comm; 254 thread_name = current->comm;
255 set_freezable_with_signal();
254 256
255 ret = -ENOMEM; 257 ret = -ENOMEM;
256 258
@@ -305,7 +307,8 @@ static int dmatest_func(void *data)
305 dma_addr_t dma_srcs[src_cnt]; 307 dma_addr_t dma_srcs[src_cnt];
306 dma_addr_t dma_dsts[dst_cnt]; 308 dma_addr_t dma_dsts[dst_cnt];
307 struct completion cmp; 309 struct completion cmp;
308 unsigned long tmo = msecs_to_jiffies(timeout); 310 unsigned long start, tmo, end = 0 /* compiler... */;
311 bool reload = true;
309 u8 align = 0; 312 u8 align = 0;
310 313
311 total_tests++; 314 total_tests++;
@@ -404,7 +407,17 @@ static int dmatest_func(void *data)
404 } 407 }
405 dma_async_issue_pending(chan); 408 dma_async_issue_pending(chan);
406 409
407 tmo = wait_for_completion_timeout(&cmp, tmo); 410 do {
411 start = jiffies;
412 if (reload)
413 end = start + msecs_to_jiffies(timeout);
414 else if (end <= start)
415 end = start + 1;
416 tmo = wait_for_completion_interruptible_timeout(&cmp,
417 end - start);
418 reload = try_to_freeze();
419 } while (tmo == -ERESTARTSYS);
420
408 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 421 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
409 422
410 if (tmo == 0) { 423 if (tmo == 0) {
@@ -477,6 +490,8 @@ err_srcs:
477 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 490 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
478 thread_name, total_tests, failed_tests, ret); 491 thread_name, total_tests, failed_tests, ret);
479 492
493 /* terminate all transfers on specified channels */
494 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
480 if (iterations > 0) 495 if (iterations > 0)
481 while (!kthread_should_stop()) { 496 while (!kthread_should_stop()) {
482 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); 497 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@@ -499,6 +514,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
499 list_del(&thread->node); 514 list_del(&thread->node);
500 kfree(thread); 515 kfree(thread);
501 } 516 }
517
518 /* terminate all transfers on specified channels */
519 dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
520
502 kfree(dtc); 521 kfree(dtc);
503} 522}
504 523
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 4d180ca9a1d8..9bfd6d360718 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1407,12 +1407,11 @@ static int __init dw_probe(struct platform_device *pdev)
1407 dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1407 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1408 1408
1409 INIT_LIST_HEAD(&dw->dma.channels); 1409 INIT_LIST_HEAD(&dw->dma.channels);
1410 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { 1410 for (i = 0; i < pdata->nr_channels; i++) {
1411 struct dw_dma_chan *dwc = &dw->chan[i]; 1411 struct dw_dma_chan *dwc = &dw->chan[i];
1412 1412
1413 dwc->chan.device = &dw->dma; 1413 dwc->chan.device = &dw->dma;
1414 dwc->chan.cookie = dwc->completed = 1; 1414 dwc->chan.cookie = dwc->completed = 1;
1415 dwc->chan.chan_id = i;
1416 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1415 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1417 list_add_tail(&dwc->chan.device_node, 1416 list_add_tail(&dwc->chan.device_node,
1418 &dw->dma.channels); 1417 &dw->dma.channels);
@@ -1468,7 +1467,7 @@ static int __init dw_probe(struct platform_device *pdev)
1468 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1467 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1469 1468
1470 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", 1469 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1471 dev_name(&pdev->dev), dw->dma.chancnt); 1470 dev_name(&pdev->dev), pdata->nr_channels);
1472 1471
1473 dma_async_device_register(&dw->dma); 1472 dma_async_device_register(&dw->dma);
1474 1473
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 5d7a49bd7c26..b47e2b803faf 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/dmaengine.h> 24#include <linux/dmaengine.h>
25#include <linux/module.h>
25#include <linux/platform_device.h> 26#include <linux/platform_device.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27 28
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index d99f71c356b5..d746899f36e1 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -14,6 +14,7 @@
14 * http://www.gnu.org/copyleft/gpl.html 14 * http://www.gnu.org/copyleft/gpl.html
15 */ 15 */
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h>
17#include <linux/types.h> 18#include <linux/types.h>
18#include <linux/mm.h> 19#include <linux/mm.h>
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7bd7e98548cd..eab1fe71259e 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/module.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
@@ -318,6 +319,7 @@ struct sdma_engine {
318 dma_addr_t context_phys; 319 dma_addr_t context_phys;
319 struct dma_device dma_device; 320 struct dma_device dma_device;
320 struct clk *clk; 321 struct clk *clk;
322 struct mutex channel_0_lock;
321 struct sdma_script_start_addrs *script_addrs; 323 struct sdma_script_start_addrs *script_addrs;
322}; 324};
323 325
@@ -415,11 +417,15 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
415 dma_addr_t buf_phys; 417 dma_addr_t buf_phys;
416 int ret; 418 int ret;
417 419
420 mutex_lock(&sdma->channel_0_lock);
421
418 buf_virt = dma_alloc_coherent(NULL, 422 buf_virt = dma_alloc_coherent(NULL,
419 size, 423 size,
420 &buf_phys, GFP_KERNEL); 424 &buf_phys, GFP_KERNEL);
421 if (!buf_virt) 425 if (!buf_virt) {
422 return -ENOMEM; 426 ret = -ENOMEM;
427 goto err_out;
428 }
423 429
424 bd0->mode.command = C0_SETPM; 430 bd0->mode.command = C0_SETPM;
425 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 431 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
@@ -433,6 +439,9 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
433 439
434 dma_free_coherent(NULL, size, buf_virt, buf_phys); 440 dma_free_coherent(NULL, size, buf_virt, buf_phys);
435 441
442err_out:
443 mutex_unlock(&sdma->channel_0_lock);
444
436 return ret; 445 return ret;
437} 446}
438 447
@@ -656,6 +665,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
656 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); 665 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
657 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); 666 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
658 667
668 mutex_lock(&sdma->channel_0_lock);
669
659 memset(context, 0, sizeof(*context)); 670 memset(context, 0, sizeof(*context));
660 context->channel_state.pc = load_address; 671 context->channel_state.pc = load_address;
661 672
@@ -676,6 +687,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
676 687
677 ret = sdma_run_channel(&sdma->channel[0]); 688 ret = sdma_run_channel(&sdma->channel[0]);
678 689
690 mutex_unlock(&sdma->channel_0_lock);
691
679 return ret; 692 return ret;
680} 693}
681 694
@@ -1131,18 +1144,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
1131 saddr_arr[i] = addr_arr[i]; 1144 saddr_arr[i] = addr_arr[i];
1132} 1145}
1133 1146
1134static int __init sdma_get_firmware(struct sdma_engine *sdma, 1147static void sdma_load_firmware(const struct firmware *fw, void *context)
1135 const char *fw_name)
1136{ 1148{
1137 const struct firmware *fw; 1149 struct sdma_engine *sdma = context;
1138 const struct sdma_firmware_header *header; 1150 const struct sdma_firmware_header *header;
1139 int ret;
1140 const struct sdma_script_start_addrs *addr; 1151 const struct sdma_script_start_addrs *addr;
1141 unsigned short *ram_code; 1152 unsigned short *ram_code;
1142 1153
1143 ret = request_firmware(&fw, fw_name, sdma->dev); 1154 if (!fw) {
1144 if (ret) 1155 dev_err(sdma->dev, "firmware not found\n");
1145 return ret; 1156 return;
1157 }
1146 1158
1147 if (fw->size < sizeof(*header)) 1159 if (fw->size < sizeof(*header))
1148 goto err_firmware; 1160 goto err_firmware;
@@ -1172,6 +1184,16 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
1172 1184
1173err_firmware: 1185err_firmware:
1174 release_firmware(fw); 1186 release_firmware(fw);
1187}
1188
1189static int __init sdma_get_firmware(struct sdma_engine *sdma,
1190 const char *fw_name)
1191{
1192 int ret;
1193
1194 ret = request_firmware_nowait(THIS_MODULE,
1195 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1196 GFP_KERNEL, sdma, sdma_load_firmware);
1175 1197
1176 return ret; 1198 return ret;
1177} 1199}
@@ -1269,11 +1291,14 @@ static int __init sdma_probe(struct platform_device *pdev)
1269 struct sdma_platform_data *pdata = pdev->dev.platform_data; 1291 struct sdma_platform_data *pdata = pdev->dev.platform_data;
1270 int i; 1292 int i;
1271 struct sdma_engine *sdma; 1293 struct sdma_engine *sdma;
1294 s32 *saddr_arr;
1272 1295
1273 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1296 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1274 if (!sdma) 1297 if (!sdma)
1275 return -ENOMEM; 1298 return -ENOMEM;
1276 1299
1300 mutex_init(&sdma->channel_0_lock);
1301
1277 sdma->dev = &pdev->dev; 1302 sdma->dev = &pdev->dev;
1278 1303
1279 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1304 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1310,6 +1335,11 @@ static int __init sdma_probe(struct platform_device *pdev)
1310 goto err_alloc; 1335 goto err_alloc;
1311 } 1336 }
1312 1337
1338 /* initially no scripts available */
1339 saddr_arr = (s32 *)sdma->script_addrs;
1340 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1341 saddr_arr[i] = -EINVAL;
1342
1313 if (of_id) 1343 if (of_id)
1314 pdev->id_entry = of_id->data; 1344 pdev->id_entry = of_id->data;
1315 sdma->devtype = pdev->id_entry->driver_data; 1345 sdma->devtype = pdev->id_entry->driver_data;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 8a3fdd87db97..9e96c43a846a 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -115,16 +115,15 @@ DMAC1 interrupt Functions*/
115 115
116/** 116/**
117 * dmac1_mask_periphral_intr - mask the periphral interrupt 117 * dmac1_mask_periphral_intr - mask the periphral interrupt
118 * @midc: dma channel for which masking is required 118 * @mid: dma device for which masking is required
119 * 119 *
120 * Masks the DMA periphral interrupt 120 * Masks the DMA periphral interrupt
121 * this is valid for DMAC1 family controllers only 121 * this is valid for DMAC1 family controllers only
122 * This controller should have periphral mask registers already mapped 122 * This controller should have periphral mask registers already mapped
123 */ 123 */
124static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc) 124static void dmac1_mask_periphral_intr(struct middma_device *mid)
125{ 125{
126 u32 pimr; 126 u32 pimr;
127 struct middma_device *mid = to_middma_device(midc->chan.device);
128 127
129 if (mid->pimr_mask) { 128 if (mid->pimr_mask) {
130 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 129 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
@@ -184,7 +183,6 @@ static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
184static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) 183static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
185{ 184{
186 /*Check LPE PISR, make sure fwd is disabled*/ 185 /*Check LPE PISR, make sure fwd is disabled*/
187 dmac1_mask_periphral_intr(midc);
188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); 186 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 187 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
@@ -1114,7 +1112,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
1114 1112
1115 midch->chan.device = &dma->common; 1113 midch->chan.device = &dma->common;
1116 midch->chan.cookie = 1; 1114 midch->chan.cookie = 1;
1117 midch->chan.chan_id = i;
1118 midch->ch_id = dma->chan_base + i; 1115 midch->ch_id = dma->chan_base + i;
1119 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1116 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
1120 1117
@@ -1150,7 +1147,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
1150 dma_cap_set(DMA_SLAVE, dma->common.cap_mask); 1147 dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
1151 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); 1148 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
1152 dma->common.dev = &pdev->dev; 1149 dma->common.dev = &pdev->dev;
1153 dma->common.chancnt = dma->max_chan;
1154 1150
1155 dma->common.device_alloc_chan_resources = 1151 dma->common.device_alloc_chan_resources =
1156 intel_mid_dma_alloc_chan_resources; 1152 intel_mid_dma_alloc_chan_resources;
@@ -1350,6 +1346,7 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
1350 if (device->ch[i].in_use) 1346 if (device->ch[i].in_use)
1351 return -EAGAIN; 1347 return -EAGAIN;
1352 } 1348 }
1349 dmac1_mask_periphral_intr(device);
1353 device->state = SUSPENDED; 1350 device->state = SUSPENDED;
1354 pci_save_state(pci); 1351 pci_save_state(pci);
1355 pci_disable_device(pci); 1352 pci_disable_device(pci);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index b9bae94f2015..8ba4edc6185e 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -741,7 +741,6 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
741 mchan = &mdma->channels[i]; 741 mchan = &mdma->channels[i];
742 742
743 mchan->chan.device = dma; 743 mchan->chan.device = dma;
744 mchan->chan.chan_id = i;
745 mchan->chan.cookie = 1; 744 mchan->chan.cookie = 1;
746 mchan->completed_cookie = mchan->chan.cookie; 745 mchan->completed_cookie = mchan->chan.cookie;
747 746
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index be641cbd36fc..b4588bdd98bb 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -130,6 +130,23 @@ struct mxs_dma_engine {
130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; 130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
131}; 131};
132 132
133static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
134{
135 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
136 int chan_id = mxs_chan->chan.chan_id;
137 int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
138
139 /* enable apbh channel clock */
140 if (dma_is_apbh()) {
141 if (apbh_is_old())
142 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
143 mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
144 else
145 writel(1 << chan_id,
146 mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
147 }
148}
149
133static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 150static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
134{ 151{
135 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 152 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -148,38 +165,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
148 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 165 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
149 int chan_id = mxs_chan->chan.chan_id; 166 int chan_id = mxs_chan->chan.chan_id;
150 167
168 /* clkgate needs to be enabled before writing other registers */
169 mxs_dma_clkgate(mxs_chan, 1);
170
151 /* set cmd_addr up */ 171 /* set cmd_addr up */
152 writel(mxs_chan->ccw_phys, 172 writel(mxs_chan->ccw_phys,
153 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); 173 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
154 174
155 /* enable apbh channel clock */
156 if (dma_is_apbh()) {
157 if (apbh_is_old())
158 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
159 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
160 else
161 writel(1 << chan_id,
162 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
163 }
164
165 /* write 1 to SEMA to kick off the channel */ 175 /* write 1 to SEMA to kick off the channel */
166 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); 176 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
167} 177}
168 178
169static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 179static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
170{ 180{
171 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
172 int chan_id = mxs_chan->chan.chan_id;
173
174 /* disable apbh channel clock */ 181 /* disable apbh channel clock */
175 if (dma_is_apbh()) { 182 mxs_dma_clkgate(mxs_chan, 0);
176 if (apbh_is_old())
177 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
178 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
179 else
180 writel(1 << chan_id,
181 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
182 }
183 183
184 mxs_chan->status = DMA_SUCCESS; 184 mxs_chan->status = DMA_SUCCESS;
185} 185}
@@ -338,7 +338,10 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
338 if (ret) 338 if (ret)
339 goto err_clk; 339 goto err_clk;
340 340
341 /* clkgate needs to be enabled for reset to finish */
342 mxs_dma_clkgate(mxs_chan, 1);
341 mxs_dma_reset_chan(mxs_chan); 343 mxs_dma_reset_chan(mxs_chan);
344 mxs_dma_clkgate(mxs_chan, 0);
342 345
343 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 346 dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
344 mxs_chan->desc.tx_submit = mxs_dma_tx_submit; 347 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1ac8d4b580b7..a6d0e3dbed07 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -60,7 +60,7 @@
60#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 60#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
61#define DMA_DESC_FOLLOW_WITH_IRQ 0x3 61#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
62 62
63#define MAX_CHAN_NR 8 63#define MAX_CHAN_NR 12
64 64
65#define DMA_MASK_CTL0_MODE 0x33333333 65#define DMA_MASK_CTL0_MODE 0x33333333
66#define DMA_MASK_CTL2_MODE 0x00003333 66#define DMA_MASK_CTL2_MODE 0x00003333
@@ -872,8 +872,7 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
872 int i; 872 int i;
873 873
874 nr_channels = id->driver_data; 874 nr_channels = id->driver_data;
875 pd = kzalloc(sizeof(struct pch_dma)+ 875 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
876 sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
877 if (!pd) 876 if (!pd)
878 return -ENOMEM; 877 return -ENOMEM;
879 878
@@ -926,7 +925,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
926 } 925 }
927 926
928 pd->dma.dev = &pdev->dev; 927 pd->dma.dev = &pdev->dev;
929 pd->dma.chancnt = nr_channels;
930 928
931 INIT_LIST_HEAD(&pd->dma.channels); 929 INIT_LIST_HEAD(&pd->dma.channels);
932 930
@@ -935,7 +933,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
935 933
936 pd_chan->chan.device = &pd->dma; 934 pd_chan->chan.device = &pd->dma;
937 pd_chan->chan.cookie = 1; 935 pd_chan->chan.cookie = 1;
938 pd_chan->chan.chan_id = i;
939 936
940 pd_chan->membase = &regs->desc[i]; 937 pd_chan->membase = &regs->desc[i];
941 938
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 00eee59e8b33..571041477ab2 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -17,6 +17,8 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/amba/bus.h> 18#include <linux/amba/bus.h>
19#include <linux/amba/pl330.h> 19#include <linux/amba/pl330.h>
20#include <linux/pm_runtime.h>
21#include <linux/scatterlist.h>
20 22
21#define NR_DEFAULT_DESC 16 23#define NR_DEFAULT_DESC 16
22 24
@@ -68,6 +70,14 @@ struct dma_pl330_chan {
68 * NULL if the channel is available to be acquired. 70 * NULL if the channel is available to be acquired.
69 */ 71 */
70 void *pl330_chid; 72 void *pl330_chid;
73
74 /* For D-to-M and M-to-D channels */
75 int burst_sz; /* the peripheral fifo width */
76 int burst_len; /* the number of burst */
77 dma_addr_t fifo_addr;
78
79 /* for cyclic capability */
80 bool cyclic;
71}; 81};
72 82
73struct dma_pl330_dmac { 83struct dma_pl330_dmac {
@@ -83,6 +93,8 @@ struct dma_pl330_dmac {
83 93
84 /* Peripheral channels connected to this DMAC */ 94 /* Peripheral channels connected to this DMAC */
85 struct dma_pl330_chan *peripherals; /* keep at end */ 95 struct dma_pl330_chan *peripherals; /* keep at end */
96
97 struct clk *clk;
86}; 98};
87 99
88struct dma_pl330_desc { 100struct dma_pl330_desc {
@@ -152,6 +164,31 @@ static inline void free_desc_list(struct list_head *list)
152 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 164 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
153} 165}
154 166
167static inline void handle_cyclic_desc_list(struct list_head *list)
168{
169 struct dma_pl330_desc *desc;
170 struct dma_pl330_chan *pch;
171 unsigned long flags;
172
173 if (list_empty(list))
174 return;
175
176 list_for_each_entry(desc, list, node) {
177 dma_async_tx_callback callback;
178
179 /* Change status to reload it */
180 desc->status = PREP;
181 pch = desc->pchan;
182 callback = desc->txd.callback;
183 if (callback)
184 callback(desc->txd.callback_param);
185 }
186
187 spin_lock_irqsave(&pch->lock, flags);
188 list_splice_tail_init(list, &pch->work_list);
189 spin_unlock_irqrestore(&pch->lock, flags);
190}
191
155static inline void fill_queue(struct dma_pl330_chan *pch) 192static inline void fill_queue(struct dma_pl330_chan *pch)
156{ 193{
157 struct dma_pl330_desc *desc; 194 struct dma_pl330_desc *desc;
@@ -205,7 +242,10 @@ static void pl330_tasklet(unsigned long data)
205 242
206 spin_unlock_irqrestore(&pch->lock, flags); 243 spin_unlock_irqrestore(&pch->lock, flags);
207 244
208 free_desc_list(&list); 245 if (pch->cyclic)
246 handle_cyclic_desc_list(&list);
247 else
248 free_desc_list(&list);
209} 249}
210 250
211static void dma_pl330_rqcb(void *token, enum pl330_op_err err) 251static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -236,6 +276,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
236 spin_lock_irqsave(&pch->lock, flags); 276 spin_lock_irqsave(&pch->lock, flags);
237 277
238 pch->completed = chan->cookie = 1; 278 pch->completed = chan->cookie = 1;
279 pch->cyclic = false;
239 280
240 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 281 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
241 if (!pch->pl330_chid) { 282 if (!pch->pl330_chid) {
@@ -253,25 +294,52 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
253static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) 294static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
254{ 295{
255 struct dma_pl330_chan *pch = to_pchan(chan); 296 struct dma_pl330_chan *pch = to_pchan(chan);
256 struct dma_pl330_desc *desc; 297 struct dma_pl330_desc *desc, *_dt;
257 unsigned long flags; 298 unsigned long flags;
299 struct dma_pl330_dmac *pdmac = pch->dmac;
300 struct dma_slave_config *slave_config;
301 LIST_HEAD(list);
258 302
259 /* Only supports DMA_TERMINATE_ALL */ 303 switch (cmd) {
260 if (cmd != DMA_TERMINATE_ALL) 304 case DMA_TERMINATE_ALL:
261 return -ENXIO; 305 spin_lock_irqsave(&pch->lock, flags);
262
263 spin_lock_irqsave(&pch->lock, flags);
264
265 /* FLUSH the PL330 Channel thread */
266 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
267 306
268 /* Mark all desc done */ 307 /* FLUSH the PL330 Channel thread */
269 list_for_each_entry(desc, &pch->work_list, node) 308 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
270 desc->status = DONE;
271 309
272 spin_unlock_irqrestore(&pch->lock, flags); 310 /* Mark all desc done */
311 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
312 desc->status = DONE;
313 pch->completed = desc->txd.cookie;
314 list_move_tail(&desc->node, &list);
315 }
273 316
274 pl330_tasklet((unsigned long) pch); 317 list_splice_tail_init(&list, &pdmac->desc_pool);
318 spin_unlock_irqrestore(&pch->lock, flags);
319 break;
320 case DMA_SLAVE_CONFIG:
321 slave_config = (struct dma_slave_config *)arg;
322
323 if (slave_config->direction == DMA_TO_DEVICE) {
324 if (slave_config->dst_addr)
325 pch->fifo_addr = slave_config->dst_addr;
326 if (slave_config->dst_addr_width)
327 pch->burst_sz = __ffs(slave_config->dst_addr_width);
328 if (slave_config->dst_maxburst)
329 pch->burst_len = slave_config->dst_maxburst;
330 } else if (slave_config->direction == DMA_FROM_DEVICE) {
331 if (slave_config->src_addr)
332 pch->fifo_addr = slave_config->src_addr;
333 if (slave_config->src_addr_width)
334 pch->burst_sz = __ffs(slave_config->src_addr_width);
335 if (slave_config->src_maxburst)
336 pch->burst_len = slave_config->src_maxburst;
337 }
338 break;
339 default:
340 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
341 return -ENXIO;
342 }
275 343
276 return 0; 344 return 0;
277} 345}
@@ -288,6 +356,9 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
288 pl330_release_channel(pch->pl330_chid); 356 pl330_release_channel(pch->pl330_chid);
289 pch->pl330_chid = NULL; 357 pch->pl330_chid = NULL;
290 358
359 if (pch->cyclic)
360 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
361
291 spin_unlock_irqrestore(&pch->lock, flags); 362 spin_unlock_irqrestore(&pch->lock, flags);
292} 363}
293 364
@@ -453,7 +524,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
453 524
454 if (peri) { 525 if (peri) {
455 desc->req.rqtype = peri->rqtype; 526 desc->req.rqtype = peri->rqtype;
456 desc->req.peri = peri->peri_id; 527 desc->req.peri = pch->chan.chan_id;
457 } else { 528 } else {
458 desc->req.rqtype = MEMTOMEM; 529 desc->req.rqtype = MEMTOMEM;
459 desc->req.peri = 0; 530 desc->req.peri = 0;
@@ -524,6 +595,51 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
524 return burst_len; 595 return burst_len;
525} 596}
526 597
598static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
599 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
600 size_t period_len, enum dma_data_direction direction)
601{
602 struct dma_pl330_desc *desc;
603 struct dma_pl330_chan *pch = to_pchan(chan);
604 dma_addr_t dst;
605 dma_addr_t src;
606
607 desc = pl330_get_desc(pch);
608 if (!desc) {
609 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
610 __func__, __LINE__);
611 return NULL;
612 }
613
614 switch (direction) {
615 case DMA_TO_DEVICE:
616 desc->rqcfg.src_inc = 1;
617 desc->rqcfg.dst_inc = 0;
618 src = dma_addr;
619 dst = pch->fifo_addr;
620 break;
621 case DMA_FROM_DEVICE:
622 desc->rqcfg.src_inc = 0;
623 desc->rqcfg.dst_inc = 1;
624 src = pch->fifo_addr;
625 dst = dma_addr;
626 break;
627 default:
628 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
629 __func__, __LINE__);
630 return NULL;
631 }
632
633 desc->rqcfg.brst_size = pch->burst_sz;
634 desc->rqcfg.brst_len = 1;
635
636 pch->cyclic = true;
637
638 fill_px(&desc->px, dst, src, period_len);
639
640 return &desc->txd;
641}
642
527static struct dma_async_tx_descriptor * 643static struct dma_async_tx_descriptor *
528pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, 644pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
529 dma_addr_t src, size_t len, unsigned long flags) 645 dma_addr_t src, size_t len, unsigned long flags)
@@ -579,7 +695,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
579 struct dma_pl330_peri *peri = chan->private; 695 struct dma_pl330_peri *peri = chan->private;
580 struct scatterlist *sg; 696 struct scatterlist *sg;
581 unsigned long flags; 697 unsigned long flags;
582 int i, burst_size; 698 int i;
583 dma_addr_t addr; 699 dma_addr_t addr;
584 700
585 if (unlikely(!pch || !sgl || !sg_len || !peri)) 701 if (unlikely(!pch || !sgl || !sg_len || !peri))
@@ -595,8 +711,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
595 return NULL; 711 return NULL;
596 } 712 }
597 713
598 addr = peri->fifo_addr; 714 addr = pch->fifo_addr;
599 burst_size = peri->burst_sz;
600 715
601 first = NULL; 716 first = NULL;
602 717
@@ -644,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
644 sg_dma_address(sg), addr, sg_dma_len(sg)); 759 sg_dma_address(sg), addr, sg_dma_len(sg));
645 } 760 }
646 761
647 desc->rqcfg.brst_size = burst_size; 762 desc->rqcfg.brst_size = pch->burst_sz;
648 desc->rqcfg.brst_len = 1; 763 desc->rqcfg.brst_len = 1;
649 } 764 }
650 765
@@ -696,6 +811,30 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
696 goto probe_err1; 811 goto probe_err1;
697 } 812 }
698 813
814 pdmac->clk = clk_get(&adev->dev, "dma");
815 if (IS_ERR(pdmac->clk)) {
816 dev_err(&adev->dev, "Cannot get operation clock.\n");
817 ret = -EINVAL;
818 goto probe_err1;
819 }
820
821 amba_set_drvdata(adev, pdmac);
822
823#ifdef CONFIG_PM_RUNTIME
824 /* to use the runtime PM helper functions */
825 pm_runtime_enable(&adev->dev);
826
827 /* enable the power domain */
828 if (pm_runtime_get_sync(&adev->dev)) {
829 dev_err(&adev->dev, "failed to get runtime pm\n");
830 ret = -ENODEV;
831 goto probe_err1;
832 }
833#else
834 /* enable dma clk */
835 clk_enable(pdmac->clk);
836#endif
837
699 irq = adev->irq[0]; 838 irq = adev->irq[0];
700 ret = request_irq(irq, pl330_irq_handler, 0, 839 ret = request_irq(irq, pl330_irq_handler, 0,
701 dev_name(&adev->dev), pi); 840 dev_name(&adev->dev), pi);
@@ -732,6 +871,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
732 case MEMTODEV: 871 case MEMTODEV:
733 case DEVTOMEM: 872 case DEVTOMEM:
734 dma_cap_set(DMA_SLAVE, pd->cap_mask); 873 dma_cap_set(DMA_SLAVE, pd->cap_mask);
874 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
735 break; 875 break;
736 default: 876 default:
737 dev_err(&adev->dev, "DEVTODEV Not Supported\n"); 877 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
@@ -747,11 +887,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
747 spin_lock_init(&pch->lock); 887 spin_lock_init(&pch->lock);
748 pch->pl330_chid = NULL; 888 pch->pl330_chid = NULL;
749 pch->chan.device = pd; 889 pch->chan.device = pd;
750 pch->chan.chan_id = i;
751 pch->dmac = pdmac; 890 pch->dmac = pdmac;
752 891
753 /* Add the channel to the DMAC list */ 892 /* Add the channel to the DMAC list */
754 pd->chancnt++;
755 list_add_tail(&pch->chan.device_node, &pd->channels); 893 list_add_tail(&pch->chan.device_node, &pd->channels);
756 } 894 }
757 895
@@ -760,6 +898,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
760 pd->device_alloc_chan_resources = pl330_alloc_chan_resources; 898 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
761 pd->device_free_chan_resources = pl330_free_chan_resources; 899 pd->device_free_chan_resources = pl330_free_chan_resources;
762 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; 900 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
901 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
763 pd->device_tx_status = pl330_tx_status; 902 pd->device_tx_status = pl330_tx_status;
764 pd->device_prep_slave_sg = pl330_prep_slave_sg; 903 pd->device_prep_slave_sg = pl330_prep_slave_sg;
765 pd->device_control = pl330_control; 904 pd->device_control = pl330_control;
@@ -771,8 +910,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
771 goto probe_err4; 910 goto probe_err4;
772 } 911 }
773 912
774 amba_set_drvdata(adev, pdmac);
775
776 dev_info(&adev->dev, 913 dev_info(&adev->dev,
777 "Loaded driver for PL330 DMAC-%d\n", adev->periphid); 914 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
778 dev_info(&adev->dev, 915 dev_info(&adev->dev,
@@ -833,6 +970,13 @@ static int __devexit pl330_remove(struct amba_device *adev)
833 res = &adev->res; 970 res = &adev->res;
834 release_mem_region(res->start, resource_size(res)); 971 release_mem_region(res->start, resource_size(res));
835 972
973#ifdef CONFIG_PM_RUNTIME
974 pm_runtime_put(&adev->dev);
975 pm_runtime_disable(&adev->dev);
976#else
977 clk_disable(pdmac->clk);
978#endif
979
836 kfree(pdmac); 980 kfree(pdmac);
837 981
838 return 0; 982 return 0;
@@ -846,10 +990,49 @@ static struct amba_id pl330_ids[] = {
846 { 0, 0 }, 990 { 0, 0 },
847}; 991};
848 992
993#ifdef CONFIG_PM_RUNTIME
994static int pl330_runtime_suspend(struct device *dev)
995{
996 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
997
998 if (!pdmac) {
999 dev_err(dev, "failed to get dmac\n");
1000 return -ENODEV;
1001 }
1002
1003 clk_disable(pdmac->clk);
1004
1005 return 0;
1006}
1007
1008static int pl330_runtime_resume(struct device *dev)
1009{
1010 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
1011
1012 if (!pdmac) {
1013 dev_err(dev, "failed to get dmac\n");
1014 return -ENODEV;
1015 }
1016
1017 clk_enable(pdmac->clk);
1018
1019 return 0;
1020}
1021#else
1022#define pl330_runtime_suspend NULL
1023#define pl330_runtime_resume NULL
1024#endif /* CONFIG_PM_RUNTIME */
1025
1026static const struct dev_pm_ops pl330_pm_ops = {
1027 .runtime_suspend = pl330_runtime_suspend,
1028 .runtime_resume = pl330_runtime_resume,
1029};
1030
849static struct amba_driver pl330_driver = { 1031static struct amba_driver pl330_driver = {
850 .drv = { 1032 .drv = {
851 .owner = THIS_MODULE, 1033 .owner = THIS_MODULE,
852 .name = "dma-pl330", 1034 .name = "dma-pl330",
1035 .pm = &pl330_pm_ops,
853 }, 1036 },
854 .id_table = pl330_ids, 1037 .id_table = pl330_ids,
855 .probe = pl330_probe, 1038 .probe = pl330_probe,
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 7f49235d14b9..81809c2b46ab 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -259,14 +259,23 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
259 return 0; 259 return 0;
260} 260}
261 261
262static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
263
262static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 264static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
263{ 265{
264 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; 266 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
265 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 267 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
268 struct sh_dmae_slave *param = tx->chan->private;
266 dma_async_tx_callback callback = tx->callback; 269 dma_async_tx_callback callback = tx->callback;
267 dma_cookie_t cookie; 270 dma_cookie_t cookie;
271 bool power_up;
272
273 spin_lock_irq(&sh_chan->desc_lock);
268 274
269 spin_lock_bh(&sh_chan->desc_lock); 275 if (list_empty(&sh_chan->ld_queue))
276 power_up = true;
277 else
278 power_up = false;
270 279
271 cookie = sh_chan->common.cookie; 280 cookie = sh_chan->common.cookie;
272 cookie++; 281 cookie++;
@@ -302,7 +311,38 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
302 tx->cookie, &last->async_tx, sh_chan->id, 311 tx->cookie, &last->async_tx, sh_chan->id,
303 desc->hw.sar, desc->hw.tcr, desc->hw.dar); 312 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
304 313
305 spin_unlock_bh(&sh_chan->desc_lock); 314 if (power_up) {
315 sh_chan->pm_state = DMAE_PM_BUSY;
316
317 pm_runtime_get(sh_chan->dev);
318
319 spin_unlock_irq(&sh_chan->desc_lock);
320
321 pm_runtime_barrier(sh_chan->dev);
322
323 spin_lock_irq(&sh_chan->desc_lock);
324
325 /* Have we been reset, while waiting? */
326 if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
327 dev_dbg(sh_chan->dev, "Bring up channel %d\n",
328 sh_chan->id);
329 if (param) {
330 const struct sh_dmae_slave_config *cfg =
331 param->config;
332
333 dmae_set_dmars(sh_chan, cfg->mid_rid);
334 dmae_set_chcr(sh_chan, cfg->chcr);
335 } else {
336 dmae_init(sh_chan);
337 }
338
339 if (sh_chan->pm_state == DMAE_PM_PENDING)
340 sh_chan_xfer_ld_queue(sh_chan);
341 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
342 }
343 }
344
345 spin_unlock_irq(&sh_chan->desc_lock);
306 346
307 return cookie; 347 return cookie;
308} 348}
@@ -346,8 +386,6 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
346 struct sh_dmae_slave *param = chan->private; 386 struct sh_dmae_slave *param = chan->private;
347 int ret; 387 int ret;
348 388
349 pm_runtime_get_sync(sh_chan->dev);
350
351 /* 389 /*
352 * This relies on the guarantee from dmaengine that alloc_chan_resources 390 * This relies on the guarantee from dmaengine that alloc_chan_resources
353 * never runs concurrently with itself or free_chan_resources. 391 * never runs concurrently with itself or free_chan_resources.
@@ -367,31 +405,20 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
367 } 405 }
368 406
369 param->config = cfg; 407 param->config = cfg;
370
371 dmae_set_dmars(sh_chan, cfg->mid_rid);
372 dmae_set_chcr(sh_chan, cfg->chcr);
373 } else {
374 dmae_init(sh_chan);
375 } 408 }
376 409
377 spin_lock_bh(&sh_chan->desc_lock);
378 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 410 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
379 spin_unlock_bh(&sh_chan->desc_lock);
380 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); 411 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
381 if (!desc) { 412 if (!desc)
382 spin_lock_bh(&sh_chan->desc_lock);
383 break; 413 break;
384 }
385 dma_async_tx_descriptor_init(&desc->async_tx, 414 dma_async_tx_descriptor_init(&desc->async_tx,
386 &sh_chan->common); 415 &sh_chan->common);
387 desc->async_tx.tx_submit = sh_dmae_tx_submit; 416 desc->async_tx.tx_submit = sh_dmae_tx_submit;
388 desc->mark = DESC_IDLE; 417 desc->mark = DESC_IDLE;
389 418
390 spin_lock_bh(&sh_chan->desc_lock);
391 list_add(&desc->node, &sh_chan->ld_free); 419 list_add(&desc->node, &sh_chan->ld_free);
392 sh_chan->descs_allocated++; 420 sh_chan->descs_allocated++;
393 } 421 }
394 spin_unlock_bh(&sh_chan->desc_lock);
395 422
396 if (!sh_chan->descs_allocated) { 423 if (!sh_chan->descs_allocated) {
397 ret = -ENOMEM; 424 ret = -ENOMEM;
@@ -405,7 +432,7 @@ edescalloc:
405 clear_bit(param->slave_id, sh_dmae_slave_used); 432 clear_bit(param->slave_id, sh_dmae_slave_used);
406etestused: 433etestused:
407efindslave: 434efindslave:
408 pm_runtime_put(sh_chan->dev); 435 chan->private = NULL;
409 return ret; 436 return ret;
410} 437}
411 438
@@ -417,7 +444,6 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
417 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 444 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
418 struct sh_desc *desc, *_desc; 445 struct sh_desc *desc, *_desc;
419 LIST_HEAD(list); 446 LIST_HEAD(list);
420 int descs = sh_chan->descs_allocated;
421 447
422 /* Protect against ISR */ 448 /* Protect against ISR */
423 spin_lock_irq(&sh_chan->desc_lock); 449 spin_lock_irq(&sh_chan->desc_lock);
@@ -437,15 +463,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
437 chan->private = NULL; 463 chan->private = NULL;
438 } 464 }
439 465
440 spin_lock_bh(&sh_chan->desc_lock); 466 spin_lock_irq(&sh_chan->desc_lock);
441 467
442 list_splice_init(&sh_chan->ld_free, &list); 468 list_splice_init(&sh_chan->ld_free, &list);
443 sh_chan->descs_allocated = 0; 469 sh_chan->descs_allocated = 0;
444 470
445 spin_unlock_bh(&sh_chan->desc_lock); 471 spin_unlock_irq(&sh_chan->desc_lock);
446
447 if (descs > 0)
448 pm_runtime_put(sh_chan->dev);
449 472
450 list_for_each_entry_safe(desc, _desc, &list, node) 473 list_for_each_entry_safe(desc, _desc, &list, node)
451 kfree(desc); 474 kfree(desc);
@@ -534,6 +557,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
534 struct sh_desc *first = NULL, *new = NULL /* compiler... */; 557 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
535 LIST_HEAD(tx_list); 558 LIST_HEAD(tx_list);
536 int chunks = 0; 559 int chunks = 0;
560 unsigned long irq_flags;
537 int i; 561 int i;
538 562
539 if (!sg_len) 563 if (!sg_len)
@@ -544,7 +568,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
544 (SH_DMA_TCR_MAX + 1); 568 (SH_DMA_TCR_MAX + 1);
545 569
546 /* Have to lock the whole loop to protect against concurrent release */ 570 /* Have to lock the whole loop to protect against concurrent release */
547 spin_lock_bh(&sh_chan->desc_lock); 571 spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
548 572
549 /* 573 /*
550 * Chaining: 574 * Chaining:
@@ -590,7 +614,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
590 /* Put them back on the free list, so, they don't get lost */ 614 /* Put them back on the free list, so, they don't get lost */
591 list_splice_tail(&tx_list, &sh_chan->ld_free); 615 list_splice_tail(&tx_list, &sh_chan->ld_free);
592 616
593 spin_unlock_bh(&sh_chan->desc_lock); 617 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
594 618
595 return &first->async_tx; 619 return &first->async_tx;
596 620
@@ -599,7 +623,7 @@ err_get_desc:
599 new->mark = DESC_IDLE; 623 new->mark = DESC_IDLE;
600 list_splice(&tx_list, &sh_chan->ld_free); 624 list_splice(&tx_list, &sh_chan->ld_free);
601 625
602 spin_unlock_bh(&sh_chan->desc_lock); 626 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
603 627
604 return NULL; 628 return NULL;
605} 629}
@@ -661,6 +685,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
661 unsigned long arg) 685 unsigned long arg)
662{ 686{
663 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 687 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
688 unsigned long flags;
664 689
665 /* Only supports DMA_TERMINATE_ALL */ 690 /* Only supports DMA_TERMINATE_ALL */
666 if (cmd != DMA_TERMINATE_ALL) 691 if (cmd != DMA_TERMINATE_ALL)
@@ -669,7 +694,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
669 if (!chan) 694 if (!chan)
670 return -EINVAL; 695 return -EINVAL;
671 696
672 spin_lock_bh(&sh_chan->desc_lock); 697 spin_lock_irqsave(&sh_chan->desc_lock, flags);
673 dmae_halt(sh_chan); 698 dmae_halt(sh_chan);
674 699
675 if (!list_empty(&sh_chan->ld_queue)) { 700 if (!list_empty(&sh_chan->ld_queue)) {
@@ -678,9 +703,8 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
678 struct sh_desc, node); 703 struct sh_desc, node);
679 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 704 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
680 sh_chan->xmit_shift; 705 sh_chan->xmit_shift;
681
682 } 706 }
683 spin_unlock_bh(&sh_chan->desc_lock); 707 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
684 708
685 sh_dmae_chan_ld_cleanup(sh_chan, true); 709 sh_dmae_chan_ld_cleanup(sh_chan, true);
686 710
@@ -695,8 +719,9 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
695 dma_cookie_t cookie = 0; 719 dma_cookie_t cookie = 0;
696 dma_async_tx_callback callback = NULL; 720 dma_async_tx_callback callback = NULL;
697 void *param = NULL; 721 void *param = NULL;
722 unsigned long flags;
698 723
699 spin_lock_bh(&sh_chan->desc_lock); 724 spin_lock_irqsave(&sh_chan->desc_lock, flags);
700 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { 725 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
701 struct dma_async_tx_descriptor *tx = &desc->async_tx; 726 struct dma_async_tx_descriptor *tx = &desc->async_tx;
702 727
@@ -762,7 +787,13 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
762 async_tx_test_ack(&desc->async_tx)) || all) { 787 async_tx_test_ack(&desc->async_tx)) || all) {
763 /* Remove from ld_queue list */ 788 /* Remove from ld_queue list */
764 desc->mark = DESC_IDLE; 789 desc->mark = DESC_IDLE;
790
765 list_move(&desc->node, &sh_chan->ld_free); 791 list_move(&desc->node, &sh_chan->ld_free);
792
793 if (list_empty(&sh_chan->ld_queue)) {
794 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
795 pm_runtime_put(sh_chan->dev);
796 }
766 } 797 }
767 } 798 }
768 799
@@ -773,7 +804,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
773 */ 804 */
774 sh_chan->completed_cookie = sh_chan->common.cookie; 805 sh_chan->completed_cookie = sh_chan->common.cookie;
775 806
776 spin_unlock_bh(&sh_chan->desc_lock); 807 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
777 808
778 if (callback) 809 if (callback)
779 callback(param); 810 callback(param);
@@ -792,14 +823,14 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
792 ; 823 ;
793} 824}
794 825
826/* Called under spin_lock_irq(&sh_chan->desc_lock) */
795static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 827static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
796{ 828{
797 struct sh_desc *desc; 829 struct sh_desc *desc;
798 830
799 spin_lock_bh(&sh_chan->desc_lock);
800 /* DMA work check */ 831 /* DMA work check */
801 if (dmae_is_busy(sh_chan)) 832 if (dmae_is_busy(sh_chan))
802 goto sh_chan_xfer_ld_queue_end; 833 return;
803 834
804 /* Find the first not transferred descriptor */ 835 /* Find the first not transferred descriptor */
805 list_for_each_entry(desc, &sh_chan->ld_queue, node) 836 list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -812,15 +843,18 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
812 dmae_start(sh_chan); 843 dmae_start(sh_chan);
813 break; 844 break;
814 } 845 }
815
816sh_chan_xfer_ld_queue_end:
817 spin_unlock_bh(&sh_chan->desc_lock);
818} 846}
819 847
820static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 848static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
821{ 849{
822 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 850 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
823 sh_chan_xfer_ld_queue(sh_chan); 851
852 spin_lock_irq(&sh_chan->desc_lock);
853 if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
854 sh_chan_xfer_ld_queue(sh_chan);
855 else
856 sh_chan->pm_state = DMAE_PM_PENDING;
857 spin_unlock_irq(&sh_chan->desc_lock);
824} 858}
825 859
826static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, 860static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
@@ -831,6 +865,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
831 dma_cookie_t last_used; 865 dma_cookie_t last_used;
832 dma_cookie_t last_complete; 866 dma_cookie_t last_complete;
833 enum dma_status status; 867 enum dma_status status;
868 unsigned long flags;
834 869
835 sh_dmae_chan_ld_cleanup(sh_chan, false); 870 sh_dmae_chan_ld_cleanup(sh_chan, false);
836 871
@@ -841,7 +876,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
841 BUG_ON(last_complete < 0); 876 BUG_ON(last_complete < 0);
842 dma_set_tx_state(txstate, last_complete, last_used, 0); 877 dma_set_tx_state(txstate, last_complete, last_used, 0);
843 878
844 spin_lock_bh(&sh_chan->desc_lock); 879 spin_lock_irqsave(&sh_chan->desc_lock, flags);
845 880
846 status = dma_async_is_complete(cookie, last_complete, last_used); 881 status = dma_async_is_complete(cookie, last_complete, last_used);
847 882
@@ -859,7 +894,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
859 } 894 }
860 } 895 }
861 896
862 spin_unlock_bh(&sh_chan->desc_lock); 897 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
863 898
864 return status; 899 return status;
865} 900}
@@ -912,6 +947,12 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
912 947
913 list_splice_init(&sh_chan->ld_queue, &dl); 948 list_splice_init(&sh_chan->ld_queue, &dl);
914 949
950 if (!list_empty(&dl)) {
951 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
952 pm_runtime_put(sh_chan->dev);
953 }
954 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
955
915 spin_unlock(&sh_chan->desc_lock); 956 spin_unlock(&sh_chan->desc_lock);
916 957
917 /* Complete all */ 958 /* Complete all */
@@ -952,7 +993,7 @@ static void dmae_do_tasklet(unsigned long data)
952 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 993 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
953 u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 994 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
954 995
955 spin_lock(&sh_chan->desc_lock); 996 spin_lock_irq(&sh_chan->desc_lock);
956 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 997 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
957 if (desc->mark == DESC_SUBMITTED && 998 if (desc->mark == DESC_SUBMITTED &&
958 ((desc->direction == DMA_FROM_DEVICE && 999 ((desc->direction == DMA_FROM_DEVICE &&
@@ -965,10 +1006,10 @@ static void dmae_do_tasklet(unsigned long data)
965 break; 1006 break;
966 } 1007 }
967 } 1008 }
968 spin_unlock(&sh_chan->desc_lock);
969
970 /* Next desc */ 1009 /* Next desc */
971 sh_chan_xfer_ld_queue(sh_chan); 1010 sh_chan_xfer_ld_queue(sh_chan);
1011 spin_unlock_irq(&sh_chan->desc_lock);
1012
972 sh_dmae_chan_ld_cleanup(sh_chan, false); 1013 sh_dmae_chan_ld_cleanup(sh_chan, false);
973} 1014}
974 1015
@@ -1036,7 +1077,9 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1036 return -ENOMEM; 1077 return -ENOMEM;
1037 } 1078 }
1038 1079
1039 /* copy struct dma_device */ 1080 new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
1081
1082 /* reference struct dma_device */
1040 new_sh_chan->common.device = &shdev->common; 1083 new_sh_chan->common.device = &shdev->common;
1041 1084
1042 new_sh_chan->dev = shdev->common.dev; 1085 new_sh_chan->dev = shdev->common.dev;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index dc56576f9fdb..2b55a276dc5b 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -23,6 +23,12 @@
23 23
24struct device; 24struct device;
25 25
26enum dmae_pm_state {
27 DMAE_PM_ESTABLISHED,
28 DMAE_PM_BUSY,
29 DMAE_PM_PENDING,
30};
31
26struct sh_dmae_chan { 32struct sh_dmae_chan {
27 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 33 dma_cookie_t completed_cookie; /* The maximum cookie completed */
28 spinlock_t desc_lock; /* Descriptor operation lock */ 34 spinlock_t desc_lock; /* Descriptor operation lock */
@@ -38,6 +44,7 @@ struct sh_dmae_chan {
38 u32 __iomem *base; 44 u32 __iomem *base;
39 char dev_id[16]; /* unique name per DMAC of channel */ 45 char dev_id[16]; /* unique name per DMAC of channel */
40 int pm_error; 46 int pm_error;
47 enum dmae_pm_state pm_state;
41}; 48};
42 49
43struct sh_dmae_device { 50struct sh_dmae_device {
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index f69f90a61873..a4a398f2ef61 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -753,7 +753,7 @@ static int __devinit td_probe(struct platform_device *pdev)
753 753
754 INIT_LIST_HEAD(&td->dma.channels); 754 INIT_LIST_HEAD(&td->dma.channels);
755 755
756 for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) { 756 for (i = 0; i < pdata->nr_channels; i++) {
757 struct timb_dma_chan *td_chan = &td->channels[i]; 757 struct timb_dma_chan *td_chan = &td->channels[i];
758 struct timb_dma_platform_data_channel *pchan = 758 struct timb_dma_platform_data_channel *pchan =
759 pdata->channels + i; 759 pdata->channels + i;
@@ -762,12 +762,11 @@ static int __devinit td_probe(struct platform_device *pdev)
762 if ((i % 2) == pchan->rx) { 762 if ((i % 2) == pchan->rx) {
763 dev_err(&pdev->dev, "Wrong channel configuration\n"); 763 dev_err(&pdev->dev, "Wrong channel configuration\n");
764 err = -EINVAL; 764 err = -EINVAL;
765 goto err_tasklet_kill; 765 goto err_free_irq;
766 } 766 }
767 767
768 td_chan->chan.device = &td->dma; 768 td_chan->chan.device = &td->dma;
769 td_chan->chan.cookie = 1; 769 td_chan->chan.cookie = 1;
770 td_chan->chan.chan_id = i;
771 spin_lock_init(&td_chan->lock); 770 spin_lock_init(&td_chan->lock);
772 INIT_LIST_HEAD(&td_chan->active_list); 771 INIT_LIST_HEAD(&td_chan->active_list);
773 INIT_LIST_HEAD(&td_chan->queue); 772 INIT_LIST_HEAD(&td_chan->queue);