aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorKukjin Kim <kgene.kim@samsung.com>2011-10-04 05:57:38 -0400
committerKukjin Kim <kgene.kim@samsung.com>2011-10-04 05:57:38 -0400
commit59ca37f74a6e9d3e9367359f2fcbb286df7d9748 (patch)
treecbc0254ef952f10e99e384b56ccc7b81389e335e /drivers
parent7d84b3e93757fe871d57b43c422b81cc8b94057a (diff)
parentc9312209aa9fdef05b03d63bbead63bb720fd133 (diff)
Merge branch 'next/topic-dma-samsung' into next-samsung-devel
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig3
-rw-r--r--drivers/dma/amba-pl08x.c455
-rw-r--r--drivers/dma/at_hdmac.c159
-rw-r--r--drivers/dma/at_hdmac_regs.h24
-rw-r--r--drivers/dma/dmatest.c23
-rw-r--r--drivers/dma/imx-sdma.c47
-rw-r--r--drivers/dma/mxs-dma.c45
-rw-r--r--drivers/dma/pl330.c229
-rw-r--r--drivers/mmc/host/s3cmci.c6
-rw-r--r--drivers/spi/spi-s3c64xx.c175
10 files changed, 733 insertions, 433 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2e3b3d38c465..ab8f469f5cf8 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -193,7 +193,8 @@ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
193config PL330_DMA 193config PL330_DMA
194 tristate "DMA API Driver for PL330" 194 tristate "DMA API Driver for PL330"
195 select DMA_ENGINE 195 select DMA_ENGINE
196 depends on PL330 196 depends on ARM_AMBA
197 select PL330
197 help 198 help
198 Select if your platform has one or more PL330 DMACs. 199 Select if your platform has one or more PL330 DMACs.
199 You need to provide platform specific settings via 200 You need to provide platform specific settings via
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index be21e3f138a8..cd8df7f5b5c8 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -66,32 +66,29 @@
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry. 67 * will then move to the next LLI entry.
68 * 68 *
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
73 *
74 * Global TODO: 69 * Global TODO:
75 * - Break out common code from arch/arm/mach-s3c64xx and share 70 * - Break out common code from arch/arm/mach-s3c64xx and share
76 */ 71 */
77#include <linux/device.h>
78#include <linux/init.h>
79#include <linux/module.h>
80#include <linux/interrupt.h>
81#include <linux/slab.h>
82#include <linux/delay.h>
83#include <linux/dma-mapping.h>
84#include <linux/dmapool.h>
85#include <linux/dmaengine.h>
86#include <linux/amba/bus.h> 72#include <linux/amba/bus.h>
87#include <linux/amba/pl08x.h> 73#include <linux/amba/pl08x.h>
88#include <linux/debugfs.h> 74#include <linux/debugfs.h>
75#include <linux/delay.h>
76#include <linux/device.h>
77#include <linux/dmaengine.h>
78#include <linux/dmapool.h>
79#include <linux/dma-mapping.h>
80#include <linux/init.h>
81#include <linux/interrupt.h>
82#include <linux/module.h>
83#include <linux/pm_runtime.h>
89#include <linux/seq_file.h> 84#include <linux/seq_file.h>
90 85#include <linux/slab.h>
91#include <asm/hardware/pl080.h> 86#include <asm/hardware/pl080.h>
92 87
93#define DRIVER_NAME "pl08xdmac" 88#define DRIVER_NAME "pl08xdmac"
94 89
90static struct amba_driver pl08x_amba_driver;
91
95/** 92/**
96 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 93 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
97 * @channels: the number of channels available in this variant 94 * @channels: the number of channels available in this variant
@@ -126,7 +123,8 @@ struct pl08x_lli {
126 * @phy_chans: array of data for the physical channels 123 * @phy_chans: array of data for the physical channels
127 * @pool: a pool for the LLI descriptors 124 * @pool: a pool for the LLI descriptors
128 * @pool_ctr: counter of LLIs in the pool 125 * @pool_ctr: counter of LLIs in the pool
129 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches 126 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
127 * fetches
130 * @mem_buses: set to indicate memory transfers on AHB2. 128 * @mem_buses: set to indicate memory transfers on AHB2.
131 * @lock: a spinlock for this struct 129 * @lock: a spinlock for this struct
132 */ 130 */
@@ -149,14 +147,6 @@ struct pl08x_driver_data {
149 * PL08X specific defines 147 * PL08X specific defines
150 */ 148 */
151 149
152/*
153 * Memory boundaries: the manual for PL08x says that the controller
154 * cannot read past a 1KiB boundary, so these defines are used to
155 * create transfer LLIs that do not cross such boundaries.
156 */
157#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
158#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
159
160/* Size (bytes) of each LLI buffer allocated for one transfer */ 150/* Size (bytes) of each LLI buffer allocated for one transfer */
161# define PL08X_LLI_TSFR_SIZE 0x2000 151# define PL08X_LLI_TSFR_SIZE 0x2000
162 152
@@ -272,7 +262,6 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
272 writel(val, ch->base + PL080_CH_CONFIG); 262 writel(val, ch->base + PL080_CH_CONFIG);
273} 263}
274 264
275
276/* 265/*
277 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 266 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
278 * clears any pending interrupt status. This should not be used for 267 * clears any pending interrupt status. This should not be used for
@@ -407,6 +396,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
407 return NULL; 396 return NULL;
408 } 397 }
409 398
399 pm_runtime_get_sync(&pl08x->adev->dev);
410 return ch; 400 return ch;
411} 401}
412 402
@@ -420,6 +410,8 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
420 /* Stop the channel and clear its interrupts */ 410 /* Stop the channel and clear its interrupts */
421 pl08x_terminate_phy_chan(pl08x, ch); 411 pl08x_terminate_phy_chan(pl08x, ch);
422 412
413 pm_runtime_put(&pl08x->adev->dev);
414
423 /* Mark it as free */ 415 /* Mark it as free */
424 ch->serving = NULL; 416 ch->serving = NULL;
425 spin_unlock_irqrestore(&ch->lock, flags); 417 spin_unlock_irqrestore(&ch->lock, flags);
@@ -499,36 +491,30 @@ struct pl08x_lli_build_data {
499}; 491};
500 492
501/* 493/*
502 * Autoselect a master bus to use for the transfer this prefers the 494 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
503 * destination bus if both available if fixed address on one bus the 495 * victim in case src & dest are not similarly aligned. i.e. If after aligning
504 * other will be chosen 496 * masters address with width requirements of transfer (by sending few byte by
497 * byte data), slave is still not aligned, then its width will be reduced to
498 * BYTE.
499 * - prefers the destination bus if both available
500 * - prefers bus with fixed address (i.e. peripheral)
505 */ 501 */
506static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 502static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
507 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 503 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
508{ 504{
509 if (!(cctl & PL080_CONTROL_DST_INCR)) { 505 if (!(cctl & PL080_CONTROL_DST_INCR)) {
510 *mbus = &bd->srcbus;
511 *sbus = &bd->dstbus;
512 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
513 *mbus = &bd->dstbus; 506 *mbus = &bd->dstbus;
514 *sbus = &bd->srcbus; 507 *sbus = &bd->srcbus;
508 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
509 *mbus = &bd->srcbus;
510 *sbus = &bd->dstbus;
515 } else { 511 } else {
516 if (bd->dstbus.buswidth == 4) { 512 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
517 *mbus = &bd->dstbus;
518 *sbus = &bd->srcbus;
519 } else if (bd->srcbus.buswidth == 4) {
520 *mbus = &bd->srcbus;
521 *sbus = &bd->dstbus;
522 } else if (bd->dstbus.buswidth == 2) {
523 *mbus = &bd->dstbus; 513 *mbus = &bd->dstbus;
524 *sbus = &bd->srcbus; 514 *sbus = &bd->srcbus;
525 } else if (bd->srcbus.buswidth == 2) { 515 } else {
526 *mbus = &bd->srcbus; 516 *mbus = &bd->srcbus;
527 *sbus = &bd->dstbus; 517 *sbus = &bd->dstbus;
528 } else {
529 /* bd->srcbus.buswidth == 1 */
530 *mbus = &bd->dstbus;
531 *sbus = &bd->srcbus;
532 } 518 }
533 } 519 }
534} 520}
@@ -547,7 +533,8 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
547 llis_va[num_llis].cctl = cctl; 533 llis_va[num_llis].cctl = cctl;
548 llis_va[num_llis].src = bd->srcbus.addr; 534 llis_va[num_llis].src = bd->srcbus.addr;
549 llis_va[num_llis].dst = bd->dstbus.addr; 535 llis_va[num_llis].dst = bd->dstbus.addr;
550 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); 536 llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
537 sizeof(struct pl08x_lli);
551 llis_va[num_llis].lli |= bd->lli_bus; 538 llis_va[num_llis].lli |= bd->lli_bus;
552 539
553 if (cctl & PL080_CONTROL_SRC_INCR) 540 if (cctl & PL080_CONTROL_SRC_INCR)
@@ -560,16 +547,12 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
560 bd->remainder -= len; 547 bd->remainder -= len;
561} 548}
562 549
563/* 550static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
564 * Return number of bytes to fill to boundary, or len. 551 u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
565 * This calculation works for any value of addr.
566 */
567static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
568{ 552{
569 size_t boundary_len = PL08X_BOUNDARY_SIZE - 553 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
570 (addr & (PL08X_BOUNDARY_SIZE - 1)); 554 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
571 555 (*total_bytes) += len;
572 return min(boundary_len, len);
573} 556}
574 557
575/* 558/*
@@ -583,13 +566,11 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
583 struct pl08x_bus_data *mbus, *sbus; 566 struct pl08x_bus_data *mbus, *sbus;
584 struct pl08x_lli_build_data bd; 567 struct pl08x_lli_build_data bd;
585 int num_llis = 0; 568 int num_llis = 0;
586 u32 cctl; 569 u32 cctl, early_bytes = 0;
587 size_t max_bytes_per_lli; 570 size_t max_bytes_per_lli, total_bytes = 0;
588 size_t total_bytes = 0;
589 struct pl08x_lli *llis_va; 571 struct pl08x_lli *llis_va;
590 572
591 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 573 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
592 &txd->llis_bus);
593 if (!txd->llis_va) { 574 if (!txd->llis_va) {
594 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 575 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
595 return 0; 576 return 0;
@@ -619,55 +600,85 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
619 bd.srcbus.buswidth = bd.srcbus.maxwidth; 600 bd.srcbus.buswidth = bd.srcbus.maxwidth;
620 bd.dstbus.buswidth = bd.dstbus.maxwidth; 601 bd.dstbus.buswidth = bd.dstbus.maxwidth;
621 602
622 /*
623 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
624 */
625 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
626 PL080_CONTROL_TRANSFER_SIZE_MASK;
627
628 /* We need to count this down to zero */ 603 /* We need to count this down to zero */
629 bd.remainder = txd->len; 604 bd.remainder = txd->len;
630 605
631 /*
632 * Choose bus to align to
633 * - prefers destination bus if both available
634 * - if fixed address on one bus chooses other
635 */
636 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 606 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
637 607
638 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", 608 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
639 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 609 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
640 bd.srcbus.buswidth, 610 bd.srcbus.buswidth,
641 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", 611 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
642 bd.dstbus.buswidth, 612 bd.dstbus.buswidth,
643 bd.remainder, max_bytes_per_lli); 613 bd.remainder);
644 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 614 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
645 mbus == &bd.srcbus ? "src" : "dst", 615 mbus == &bd.srcbus ? "src" : "dst",
646 sbus == &bd.srcbus ? "src" : "dst"); 616 sbus == &bd.srcbus ? "src" : "dst");
647 617
648 if (txd->len < mbus->buswidth) { 618 /*
649 /* Less than a bus width available - send as single bytes */ 619 * Zero length is only allowed if all these requirements are met:
650 while (bd.remainder) { 620 * - flow controller is peripheral.
651 dev_vdbg(&pl08x->adev->dev, 621 * - src.addr is aligned to src.width
652 "%s single byte LLIs for a transfer of " 622 * - dst.addr is aligned to dst.width
653 "less than a bus width (remain 0x%08x)\n", 623 *
654 __func__, bd.remainder); 624 * sg_len == 1 should be true, as there can be two cases here:
655 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 625 * - Memory addresses are contiguous and are not scattered. Here, Only
656 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 626 * one sg will be passed by user driver, with memory address and zero
657 total_bytes++; 627 * length. We pass this to controller and after the transfer it will
628 * receive the last burst request from peripheral and so transfer
629 * finishes.
630 *
631 * - Memory addresses are scattered and are not contiguous. Here,
632 * Obviously as DMA controller doesn't know when a lli's transfer gets
633 * over, it can't load next lli. So in this case, there has to be an
634 * assumption that only one lli is supported. Thus, we can't have
635 * scattered addresses.
636 */
637 if (!bd.remainder) {
638 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
639 PL080_CONFIG_FLOW_CONTROL_SHIFT;
640 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
641 (fc <= PL080_FLOW_SRC2DST_SRC))) {
642 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
643 __func__);
644 return 0;
658 } 645 }
659 } else { 646
660 /* Make one byte LLIs until master bus is aligned */ 647 if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
661 while ((mbus->addr) % (mbus->buswidth)) { 648 (bd.srcbus.addr % bd.srcbus.buswidth)) {
662 dev_vdbg(&pl08x->adev->dev, 649 dev_err(&pl08x->adev->dev,
663 "%s adjustment lli for less than bus width " 650 "%s src & dst address must be aligned to src"
664 "(remain 0x%08x)\n", 651 " & dst width if peripheral is flow controller",
665 __func__, bd.remainder); 652 __func__);
666 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 653 return 0;
667 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
668 total_bytes++;
669 } 654 }
670 655
656 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
657 bd.dstbus.buswidth, 0);
658 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
659 }
660
661 /*
662 * Send byte by byte for following cases
663 * - Less than a bus width available
664 * - until master bus is aligned
665 */
666 if (bd.remainder < mbus->buswidth)
667 early_bytes = bd.remainder;
668 else if ((mbus->addr) % (mbus->buswidth)) {
669 early_bytes = mbus->buswidth - (mbus->addr) % (mbus->buswidth);
670 if ((bd.remainder - early_bytes) < mbus->buswidth)
671 early_bytes = bd.remainder;
672 }
673
674 if (early_bytes) {
675 dev_vdbg(&pl08x->adev->dev, "%s byte width LLIs "
676 "(remain 0x%08x)\n", __func__, bd.remainder);
677 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
678 &total_bytes);
679 }
680
681 if (bd.remainder) {
671 /* 682 /*
672 * Master now aligned 683 * Master now aligned
673 * - if slave is not then we must set its width down 684 * - if slave is not then we must set its width down
@@ -680,138 +691,55 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
680 sbus->buswidth = 1; 691 sbus->buswidth = 1;
681 } 692 }
682 693
694 /* Bytes transferred = tsize * src width, not MIN(buswidths) */
695 max_bytes_per_lli = bd.srcbus.buswidth *
696 PL080_CONTROL_TRANSFER_SIZE_MASK;
697
683 /* 698 /*
684 * Make largest possible LLIs until less than one bus 699 * Make largest possible LLIs until less than one bus
685 * width left 700 * width left
686 */ 701 */
687 while (bd.remainder > (mbus->buswidth - 1)) { 702 while (bd.remainder > (mbus->buswidth - 1)) {
688 size_t lli_len, target_len, tsize, odd_bytes; 703 size_t lli_len, tsize, width;
689 704
690 /* 705 /*
691 * If enough left try to send max possible, 706 * If enough left try to send max possible,
692 * otherwise try to send the remainder 707 * otherwise try to send the remainder
693 */ 708 */
694 target_len = min(bd.remainder, max_bytes_per_lli); 709 lli_len = min(bd.remainder, max_bytes_per_lli);
695 710
696 /* 711 /*
697 * Set bus lengths for incrementing buses to the 712 * Check against maximum bus alignment: Calculate actual
698 * number of bytes which fill to next memory boundary, 713 * transfer size in relation to bus width and get a
699 * limiting on the target length calculated above. 714 * maximum remainder of the highest bus width - 1
700 */ 715 */
701 if (cctl & PL080_CONTROL_SRC_INCR) 716 width = max(mbus->buswidth, sbus->buswidth);
702 bd.srcbus.fill_bytes = 717 lli_len = (lli_len / width) * width;
703 pl08x_pre_boundary(bd.srcbus.addr, 718 tsize = lli_len / bd.srcbus.buswidth;
704 target_len);
705 else
706 bd.srcbus.fill_bytes = target_len;
707
708 if (cctl & PL080_CONTROL_DST_INCR)
709 bd.dstbus.fill_bytes =
710 pl08x_pre_boundary(bd.dstbus.addr,
711 target_len);
712 else
713 bd.dstbus.fill_bytes = target_len;
714
715 /* Find the nearest */
716 lli_len = min(bd.srcbus.fill_bytes,
717 bd.dstbus.fill_bytes);
718
719 BUG_ON(lli_len > bd.remainder);
720
721 if (lli_len <= 0) {
722 dev_err(&pl08x->adev->dev,
723 "%s lli_len is %zu, <= 0\n",
724 __func__, lli_len);
725 return 0;
726 }
727
728 if (lli_len == target_len) {
729 /*
730 * Can send what we wanted.
731 * Maintain alignment
732 */
733 lli_len = (lli_len/mbus->buswidth) *
734 mbus->buswidth;
735 odd_bytes = 0;
736 } else {
737 /*
738 * So now we know how many bytes to transfer
739 * to get to the nearest boundary. The next
740 * LLI will past the boundary. However, we
741 * may be working to a boundary on the slave
742 * bus. We need to ensure the master stays
743 * aligned, and that we are working in
744 * multiples of the bus widths.
745 */
746 odd_bytes = lli_len % mbus->buswidth;
747 lli_len -= odd_bytes;
748
749 }
750
751 if (lli_len) {
752 /*
753 * Check against minimum bus alignment:
754 * Calculate actual transfer size in relation
755 * to bus width an get a maximum remainder of
756 * the smallest bus width - 1
757 */
758 /* FIXME: use round_down()? */
759 tsize = lli_len / min(mbus->buswidth,
760 sbus->buswidth);
761 lli_len = tsize * min(mbus->buswidth,
762 sbus->buswidth);
763
764 if (target_len != lli_len) {
765 dev_vdbg(&pl08x->adev->dev,
766 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
767 __func__, target_len, lli_len, txd->len);
768 }
769
770 cctl = pl08x_cctl_bits(cctl,
771 bd.srcbus.buswidth,
772 bd.dstbus.buswidth,
773 tsize);
774
775 dev_vdbg(&pl08x->adev->dev,
776 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
777 __func__, lli_len, bd.remainder);
778 pl08x_fill_lli_for_desc(&bd, num_llis++,
779 lli_len, cctl);
780 total_bytes += lli_len;
781 }
782 719
783 720 dev_vdbg(&pl08x->adev->dev,
784 if (odd_bytes) { 721 "%s fill lli with single lli chunk of "
785 /* 722 "size 0x%08zx (remainder 0x%08zx)\n",
786 * Creep past the boundary, maintaining 723 __func__, lli_len, bd.remainder);
787 * master alignment 724
788 */ 725 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
789 int j; 726 bd.dstbus.buswidth, tsize);
790 for (j = 0; (j < mbus->buswidth) 727 pl08x_fill_lli_for_desc(&bd, num_llis++, lli_len, cctl);
791 && (bd.remainder); j++) { 728 total_bytes += lli_len;
792 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
793 dev_vdbg(&pl08x->adev->dev,
794 "%s align with boundary, single byte (remain 0x%08zx)\n",
795 __func__, bd.remainder);
796 pl08x_fill_lli_for_desc(&bd,
797 num_llis++, 1, cctl);
798 total_bytes++;
799 }
800 }
801 } 729 }
802 730
803 /* 731 /*
804 * Send any odd bytes 732 * Send any odd bytes
805 */ 733 */
806 while (bd.remainder) { 734 if (bd.remainder) {
807 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
808 dev_vdbg(&pl08x->adev->dev, 735 dev_vdbg(&pl08x->adev->dev,
809 "%s align with boundary, single odd byte (remain %zu)\n", 736 "%s align with boundary, send odd bytes (remain %zu)\n",
810 __func__, bd.remainder); 737 __func__, bd.remainder);
811 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 738 prep_byte_width_lli(&bd, &cctl, bd.remainder,
812 total_bytes++; 739 num_llis++, &total_bytes);
813 } 740 }
814 } 741 }
742
815 if (total_bytes != txd->len) { 743 if (total_bytes != txd->len) {
816 dev_err(&pl08x->adev->dev, 744 dev_err(&pl08x->adev->dev,
817 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 745 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
@@ -917,9 +845,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
917 * need, but for slaves the physical signals may be muxed! 845 * need, but for slaves the physical signals may be muxed!
918 * Can the platform allow us to use this channel? 846 * Can the platform allow us to use this channel?
919 */ 847 */
920 if (plchan->slave && 848 if (plchan->slave && pl08x->pd->get_signal) {
921 ch->signal < 0 &&
922 pl08x->pd->get_signal) {
923 ret = pl08x->pd->get_signal(plchan); 849 ret = pl08x->pd->get_signal(plchan);
924 if (ret < 0) { 850 if (ret < 0) {
925 dev_dbg(&pl08x->adev->dev, 851 dev_dbg(&pl08x->adev->dev,
@@ -1008,10 +934,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1008 * If slaves are relying on interrupts to signal completion this function 934 * If slaves are relying on interrupts to signal completion this function
1009 * must not be called with interrupts disabled. 935 * must not be called with interrupts disabled.
1010 */ 936 */
1011static enum dma_status 937static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1012pl08x_dma_tx_status(struct dma_chan *chan, 938 dma_cookie_t cookie, struct dma_tx_state *txstate)
1013 dma_cookie_t cookie,
1014 struct dma_tx_state *txstate)
1015{ 939{
1016 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 940 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1017 dma_cookie_t last_used; 941 dma_cookie_t last_used;
@@ -1253,7 +1177,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1253 1177
1254 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1178 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1255 if (!num_llis) { 1179 if (!num_llis) {
1256 kfree(txd); 1180 spin_lock_irqsave(&plchan->lock, flags);
1181 pl08x_free_txd(pl08x, txd);
1182 spin_unlock_irqrestore(&plchan->lock, flags);
1257 return -EINVAL; 1183 return -EINVAL;
1258 } 1184 }
1259 1185
@@ -1301,7 +1227,7 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1301static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1227static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1302 unsigned long flags) 1228 unsigned long flags)
1303{ 1229{
1304 struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1230 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1305 1231
1306 if (txd) { 1232 if (txd) {
1307 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); 1233 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
@@ -1367,7 +1293,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1367 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1293 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1368 struct pl08x_driver_data *pl08x = plchan->host; 1294 struct pl08x_driver_data *pl08x = plchan->host;
1369 struct pl08x_txd *txd; 1295 struct pl08x_txd *txd;
1370 int ret; 1296 int ret, tmp;
1371 1297
1372 /* 1298 /*
1373 * Current implementation ASSUMES only one sg 1299 * Current implementation ASSUMES only one sg
@@ -1401,12 +1327,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1401 txd->len = sgl->length; 1327 txd->len = sgl->length;
1402 1328
1403 if (direction == DMA_TO_DEVICE) { 1329 if (direction == DMA_TO_DEVICE) {
1404 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1405 txd->cctl = plchan->dst_cctl; 1330 txd->cctl = plchan->dst_cctl;
1406 txd->src_addr = sgl->dma_address; 1331 txd->src_addr = sgl->dma_address;
1407 txd->dst_addr = plchan->dst_addr; 1332 txd->dst_addr = plchan->dst_addr;
1408 } else if (direction == DMA_FROM_DEVICE) { 1333 } else if (direction == DMA_FROM_DEVICE) {
1409 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1410 txd->cctl = plchan->src_cctl; 1334 txd->cctl = plchan->src_cctl;
1411 txd->src_addr = plchan->src_addr; 1335 txd->src_addr = plchan->src_addr;
1412 txd->dst_addr = sgl->dma_address; 1336 txd->dst_addr = sgl->dma_address;
@@ -1416,6 +1340,15 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1416 return NULL; 1340 return NULL;
1417 } 1341 }
1418 1342
1343 if (plchan->cd->device_fc)
1344 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
1345 PL080_FLOW_PER2MEM_PER;
1346 else
1347 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
1348 PL080_FLOW_PER2MEM;
1349
1350 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1351
1419 ret = pl08x_prep_channel_resources(plchan, txd); 1352 ret = pl08x_prep_channel_resources(plchan, txd);
1420 if (ret) 1353 if (ret)
1421 return NULL; 1354 return NULL;
@@ -1489,9 +1422,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1489 1422
1490bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1423bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1491{ 1424{
1492 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1425 struct pl08x_dma_chan *plchan;
1493 char *name = chan_id; 1426 char *name = chan_id;
1494 1427
1428 /* Reject channels for devices not bound to this driver */
1429 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1430 return false;
1431
1432 plchan = to_pl08x_chan(chan);
1433
1495 /* Check that the channel is not taken! */ 1434 /* Check that the channel is not taken! */
1496 if (!strcmp(plchan->name, name)) 1435 if (!strcmp(plchan->name, name))
1497 return true; 1436 return true;
@@ -1507,13 +1446,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1507 */ 1446 */
1508static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1447static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1509{ 1448{
1510 u32 val; 1449 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1511
1512 val = readl(pl08x->base + PL080_CONFIG);
1513 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
1514 /* We implicitly clear bit 1 and that means little-endian mode */
1515 val |= PL080_CONFIG_ENABLE;
1516 writel(val, pl08x->base + PL080_CONFIG);
1517} 1450}
1518 1451
1519static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1452static void pl08x_unmap_buffers(struct pl08x_txd *txd)
@@ -1589,8 +1522,8 @@ static void pl08x_tasklet(unsigned long data)
1589 */ 1522 */
1590 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1523 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1591 chan.device_node) { 1524 chan.device_node) {
1592 if (waiting->state == PL08X_CHAN_WAITING && 1525 if (waiting->state == PL08X_CHAN_WAITING &&
1593 waiting->waiting != NULL) { 1526 waiting->waiting != NULL) {
1594 int ret; 1527 int ret;
1595 1528
1596 /* This should REALLY not fail now */ 1529 /* This should REALLY not fail now */
@@ -1630,38 +1563,40 @@ static void pl08x_tasklet(unsigned long data)
1630static irqreturn_t pl08x_irq(int irq, void *dev) 1563static irqreturn_t pl08x_irq(int irq, void *dev)
1631{ 1564{
1632 struct pl08x_driver_data *pl08x = dev; 1565 struct pl08x_driver_data *pl08x = dev;
1633 u32 mask = 0; 1566 u32 mask = 0, err, tc, i;
1634 u32 val; 1567
1635 int i; 1568 /* check & clear - ERR & TC interrupts */
1636 1569 err = readl(pl08x->base + PL080_ERR_STATUS);
1637 val = readl(pl08x->base + PL080_ERR_STATUS); 1570 if (err) {
1638 if (val) { 1571 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1639 /* An error interrupt (on one or more channels) */ 1572 __func__, err);
1640 dev_err(&pl08x->adev->dev, 1573 writel(err, pl08x->base + PL080_ERR_CLEAR);
1641 "%s error interrupt, register value 0x%08x\n",
1642 __func__, val);
1643 /*
1644 * Simply clear ALL PL08X error interrupts,
1645 * regardless of channel and cause
1646 * FIXME: should be 0x00000003 on PL081 really.
1647 */
1648 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1649 } 1574 }
1650 val = readl(pl08x->base + PL080_INT_STATUS); 1575 tc = readl(pl08x->base + PL080_INT_STATUS);
1576 if (tc)
1577 writel(tc, pl08x->base + PL080_TC_CLEAR);
1578
1579 if (!err && !tc)
1580 return IRQ_NONE;
1581
1651 for (i = 0; i < pl08x->vd->channels; i++) { 1582 for (i = 0; i < pl08x->vd->channels; i++) {
1652 if ((1 << i) & val) { 1583 if (((1 << i) & err) || ((1 << i) & tc)) {
1653 /* Locate physical channel */ 1584 /* Locate physical channel */
1654 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1585 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1655 struct pl08x_dma_chan *plchan = phychan->serving; 1586 struct pl08x_dma_chan *plchan = phychan->serving;
1656 1587
1588 if (!plchan) {
1589 dev_err(&pl08x->adev->dev,
1590 "%s Error TC interrupt on unused channel: 0x%08x\n",
1591 __func__, i);
1592 continue;
1593 }
1594
1657 /* Schedule tasklet on this channel */ 1595 /* Schedule tasklet on this channel */
1658 tasklet_schedule(&plchan->tasklet); 1596 tasklet_schedule(&plchan->tasklet);
1659
1660 mask |= (1 << i); 1597 mask |= (1 << i);
1661 } 1598 }
1662 } 1599 }
1663 /* Clear only the terminal interrupts on channels we processed */
1664 writel(mask, pl08x->base + PL080_TC_CLEAR);
1665 1600
1666 return mask ? IRQ_HANDLED : IRQ_NONE; 1601 return mask ? IRQ_HANDLED : IRQ_NONE;
1667} 1602}
@@ -1685,9 +1620,7 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1685 * Make a local wrapper to hold required data 1620 * Make a local wrapper to hold required data
1686 */ 1621 */
1687static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1622static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1688 struct dma_device *dmadev, 1623 struct dma_device *dmadev, unsigned int channels, bool slave)
1689 unsigned int channels,
1690 bool slave)
1691{ 1624{
1692 struct pl08x_dma_chan *chan; 1625 struct pl08x_dma_chan *chan;
1693 int i; 1626 int i;
@@ -1700,7 +1633,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1700 * to cope with that situation. 1633 * to cope with that situation.
1701 */ 1634 */
1702 for (i = 0; i < channels; i++) { 1635 for (i = 0; i < channels; i++) {
1703 chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); 1636 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1704 if (!chan) { 1637 if (!chan) {
1705 dev_err(&pl08x->adev->dev, 1638 dev_err(&pl08x->adev->dev,
1706 "%s no memory for channel\n", __func__); 1639 "%s no memory for channel\n", __func__);
@@ -1728,7 +1661,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1728 kfree(chan); 1661 kfree(chan);
1729 continue; 1662 continue;
1730 } 1663 }
1731 dev_info(&pl08x->adev->dev, 1664 dev_dbg(&pl08x->adev->dev,
1732 "initialize virtual channel \"%s\"\n", 1665 "initialize virtual channel \"%s\"\n",
1733 chan->name); 1666 chan->name);
1734 1667
@@ -1837,9 +1770,9 @@ static const struct file_operations pl08x_debugfs_operations = {
1837static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1770static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1838{ 1771{
1839 /* Expose a simple debugfs interface to view all clocks */ 1772 /* Expose a simple debugfs interface to view all clocks */
1840 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, 1773 (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
1841 NULL, pl08x, 1774 S_IFREG | S_IRUGO, NULL, pl08x,
1842 &pl08x_debugfs_operations); 1775 &pl08x_debugfs_operations);
1843} 1776}
1844 1777
1845#else 1778#else
@@ -1860,12 +1793,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1860 return ret; 1793 return ret;
1861 1794
1862 /* Create the driver state holder */ 1795 /* Create the driver state holder */
1863 pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); 1796 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
1864 if (!pl08x) { 1797 if (!pl08x) {
1865 ret = -ENOMEM; 1798 ret = -ENOMEM;
1866 goto out_no_pl08x; 1799 goto out_no_pl08x;
1867 } 1800 }
1868 1801
1802 pm_runtime_set_active(&adev->dev);
1803 pm_runtime_enable(&adev->dev);
1804
1869 /* Initialize memcpy engine */ 1805 /* Initialize memcpy engine */
1870 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1806 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1871 pl08x->memcpy.dev = &adev->dev; 1807 pl08x->memcpy.dev = &adev->dev;
@@ -1939,7 +1875,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1939 } 1875 }
1940 1876
1941 /* Initialize physical channels */ 1877 /* Initialize physical channels */
1942 pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), 1878 pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
1943 GFP_KERNEL); 1879 GFP_KERNEL);
1944 if (!pl08x->phy_chans) { 1880 if (!pl08x->phy_chans) {
1945 dev_err(&adev->dev, "%s failed to allocate " 1881 dev_err(&adev->dev, "%s failed to allocate "
@@ -1956,9 +1892,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1956 spin_lock_init(&ch->lock); 1892 spin_lock_init(&ch->lock);
1957 ch->serving = NULL; 1893 ch->serving = NULL;
1958 ch->signal = -1; 1894 ch->signal = -1;
1959 dev_info(&adev->dev, 1895 dev_dbg(&adev->dev, "physical channel %d is %s\n",
1960 "physical channel %d is %s\n", i, 1896 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1961 pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1962 } 1897 }
1963 1898
1964 /* Register as many memcpy channels as there are physical channels */ 1899 /* Register as many memcpy channels as there are physical channels */
@@ -1974,8 +1909,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1974 1909
1975 /* Register slave channels */ 1910 /* Register slave channels */
1976 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1911 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
1977 pl08x->pd->num_slave_channels, 1912 pl08x->pd->num_slave_channels, true);
1978 true);
1979 if (ret <= 0) { 1913 if (ret <= 0) {
1980 dev_warn(&pl08x->adev->dev, 1914 dev_warn(&pl08x->adev->dev,
1981 "%s failed to enumerate slave channels - %d\n", 1915 "%s failed to enumerate slave channels - %d\n",
@@ -2005,6 +1939,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2005 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 1939 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2006 amba_part(adev), amba_rev(adev), 1940 amba_part(adev), amba_rev(adev),
2007 (unsigned long long)adev->res.start, adev->irq[0]); 1941 (unsigned long long)adev->res.start, adev->irq[0]);
1942
1943 pm_runtime_put(&adev->dev);
2008 return 0; 1944 return 0;
2009 1945
2010out_no_slave_reg: 1946out_no_slave_reg:
@@ -2023,6 +1959,9 @@ out_no_ioremap:
2023 dma_pool_destroy(pl08x->pool); 1959 dma_pool_destroy(pl08x->pool);
2024out_no_lli_pool: 1960out_no_lli_pool:
2025out_no_platdata: 1961out_no_platdata:
1962 pm_runtime_put(&adev->dev);
1963 pm_runtime_disable(&adev->dev);
1964
2026 kfree(pl08x); 1965 kfree(pl08x);
2027out_no_pl08x: 1966out_no_pl08x:
2028 amba_release_regions(adev); 1967 amba_release_regions(adev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 6a483eac7b3f..3b99dc62874b 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
107{ 107{
108 struct at_desc *desc, *_desc; 108 struct at_desc *desc, *_desc;
109 struct at_desc *ret = NULL; 109 struct at_desc *ret = NULL;
110 unsigned long flags;
110 unsigned int i = 0; 111 unsigned int i = 0;
111 LIST_HEAD(tmp_list); 112 LIST_HEAD(tmp_list);
112 113
113 spin_lock_bh(&atchan->lock); 114 spin_lock_irqsave(&atchan->lock, flags);
114 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 115 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
115 i++; 116 i++;
116 if (async_tx_test_ack(&desc->txd)) { 117 if (async_tx_test_ack(&desc->txd)) {
@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
121 dev_dbg(chan2dev(&atchan->chan_common), 122 dev_dbg(chan2dev(&atchan->chan_common),
122 "desc %p not ACKed\n", desc); 123 "desc %p not ACKed\n", desc);
123 } 124 }
124 spin_unlock_bh(&atchan->lock); 125 spin_unlock_irqrestore(&atchan->lock, flags);
125 dev_vdbg(chan2dev(&atchan->chan_common), 126 dev_vdbg(chan2dev(&atchan->chan_common),
126 "scanned %u descriptors on freelist\n", i); 127 "scanned %u descriptors on freelist\n", i);
127 128
@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
129 if (!ret) { 130 if (!ret) {
130 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 131 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
131 if (ret) { 132 if (ret) {
132 spin_lock_bh(&atchan->lock); 133 spin_lock_irqsave(&atchan->lock, flags);
133 atchan->descs_allocated++; 134 atchan->descs_allocated++;
134 spin_unlock_bh(&atchan->lock); 135 spin_unlock_irqrestore(&atchan->lock, flags);
135 } else { 136 } else {
136 dev_err(chan2dev(&atchan->chan_common), 137 dev_err(chan2dev(&atchan->chan_common),
137 "not enough descriptors available\n"); 138 "not enough descriptors available\n");
@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
150{ 151{
151 if (desc) { 152 if (desc) {
152 struct at_desc *child; 153 struct at_desc *child;
154 unsigned long flags;
153 155
154 spin_lock_bh(&atchan->lock); 156 spin_lock_irqsave(&atchan->lock, flags);
155 list_for_each_entry(child, &desc->tx_list, desc_node) 157 list_for_each_entry(child, &desc->tx_list, desc_node)
156 dev_vdbg(chan2dev(&atchan->chan_common), 158 dev_vdbg(chan2dev(&atchan->chan_common),
157 "moving child desc %p to freelist\n", 159 "moving child desc %p to freelist\n",
@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
160 dev_vdbg(chan2dev(&atchan->chan_common), 162 dev_vdbg(chan2dev(&atchan->chan_common),
161 "moving desc %p to freelist\n", desc); 163 "moving desc %p to freelist\n", desc);
162 list_add(&desc->desc_node, &atchan->free_list); 164 list_add(&desc->desc_node, &atchan->free_list);
163 spin_unlock_bh(&atchan->lock); 165 spin_unlock_irqrestore(&atchan->lock, flags);
164 } 166 }
165} 167}
166 168
@@ -299,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
299 301
300 /* for cyclic transfers, 302 /* for cyclic transfers,
301 * no need to replay callback function while stopping */ 303 * no need to replay callback function while stopping */
302 if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { 304 if (!atc_chan_is_cyclic(atchan)) {
303 dma_async_tx_callback callback = txd->callback; 305 dma_async_tx_callback callback = txd->callback;
304 void *param = txd->callback_param; 306 void *param = txd->callback_param;
305 307
@@ -471,16 +473,17 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
471static void atc_tasklet(unsigned long data) 473static void atc_tasklet(unsigned long data)
472{ 474{
473 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 475 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
476 unsigned long flags;
474 477
475 spin_lock(&atchan->lock); 478 spin_lock_irqsave(&atchan->lock, flags);
476 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 479 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
477 atc_handle_error(atchan); 480 atc_handle_error(atchan);
478 else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 481 else if (atc_chan_is_cyclic(atchan))
479 atc_handle_cyclic(atchan); 482 atc_handle_cyclic(atchan);
480 else 483 else
481 atc_advance_work(atchan); 484 atc_advance_work(atchan);
482 485
483 spin_unlock(&atchan->lock); 486 spin_unlock_irqrestore(&atchan->lock, flags);
484} 487}
485 488
486static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 489static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
539 struct at_desc *desc = txd_to_at_desc(tx); 542 struct at_desc *desc = txd_to_at_desc(tx);
540 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 543 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
541 dma_cookie_t cookie; 544 dma_cookie_t cookie;
545 unsigned long flags;
542 546
543 spin_lock_bh(&atchan->lock); 547 spin_lock_irqsave(&atchan->lock, flags);
544 cookie = atc_assign_cookie(atchan, desc); 548 cookie = atc_assign_cookie(atchan, desc);
545 549
546 if (list_empty(&atchan->active_list)) { 550 if (list_empty(&atchan->active_list)) {
@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
554 list_add_tail(&desc->desc_node, &atchan->queue); 558 list_add_tail(&desc->desc_node, &atchan->queue);
555 } 559 }
556 560
557 spin_unlock_bh(&atchan->lock); 561 spin_unlock_irqrestore(&atchan->lock, flags);
558 562
559 return cookie; 563 return cookie;
560} 564}
@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
927 struct at_dma_chan *atchan = to_at_dma_chan(chan); 931 struct at_dma_chan *atchan = to_at_dma_chan(chan);
928 struct at_dma *atdma = to_at_dma(chan->device); 932 struct at_dma *atdma = to_at_dma(chan->device);
929 int chan_id = atchan->chan_common.chan_id; 933 int chan_id = atchan->chan_common.chan_id;
934 unsigned long flags;
930 935
931 LIST_HEAD(list); 936 LIST_HEAD(list);
932 937
933 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 938 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
934 939
935 if (cmd == DMA_PAUSE) { 940 if (cmd == DMA_PAUSE) {
936 spin_lock_bh(&atchan->lock); 941 spin_lock_irqsave(&atchan->lock, flags);
937 942
938 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 943 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
939 set_bit(ATC_IS_PAUSED, &atchan->status); 944 set_bit(ATC_IS_PAUSED, &atchan->status);
940 945
941 spin_unlock_bh(&atchan->lock); 946 spin_unlock_irqrestore(&atchan->lock, flags);
942 } else if (cmd == DMA_RESUME) { 947 } else if (cmd == DMA_RESUME) {
943 if (!test_bit(ATC_IS_PAUSED, &atchan->status)) 948 if (!atc_chan_is_paused(atchan))
944 return 0; 949 return 0;
945 950
946 spin_lock_bh(&atchan->lock); 951 spin_lock_irqsave(&atchan->lock, flags);
947 952
948 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 953 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
949 clear_bit(ATC_IS_PAUSED, &atchan->status); 954 clear_bit(ATC_IS_PAUSED, &atchan->status);
950 955
951 spin_unlock_bh(&atchan->lock); 956 spin_unlock_irqrestore(&atchan->lock, flags);
952 } else if (cmd == DMA_TERMINATE_ALL) { 957 } else if (cmd == DMA_TERMINATE_ALL) {
953 struct at_desc *desc, *_desc; 958 struct at_desc *desc, *_desc;
954 /* 959 /*
@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
957 * channel. We still have to poll the channel enable bit due 962 * channel. We still have to poll the channel enable bit due
958 * to AHB/HSB limitations. 963 * to AHB/HSB limitations.
959 */ 964 */
960 spin_lock_bh(&atchan->lock); 965 spin_lock_irqsave(&atchan->lock, flags);
961 966
962 /* disabling channel: must also remove suspend state */ 967 /* disabling channel: must also remove suspend state */
963 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 968 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
978 /* if channel dedicated to cyclic operations, free it */ 983 /* if channel dedicated to cyclic operations, free it */
979 clear_bit(ATC_IS_CYCLIC, &atchan->status); 984 clear_bit(ATC_IS_CYCLIC, &atchan->status);
980 985
981 spin_unlock_bh(&atchan->lock); 986 spin_unlock_irqrestore(&atchan->lock, flags);
982 } else { 987 } else {
983 return -ENXIO; 988 return -ENXIO;
984 } 989 }
@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
1004 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1009 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1005 dma_cookie_t last_used; 1010 dma_cookie_t last_used;
1006 dma_cookie_t last_complete; 1011 dma_cookie_t last_complete;
1012 unsigned long flags;
1007 enum dma_status ret; 1013 enum dma_status ret;
1008 1014
1009 spin_lock_bh(&atchan->lock); 1015 spin_lock_irqsave(&atchan->lock, flags);
1010 1016
1011 last_complete = atchan->completed_cookie; 1017 last_complete = atchan->completed_cookie;
1012 last_used = chan->cookie; 1018 last_used = chan->cookie;
@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
1021 ret = dma_async_is_complete(cookie, last_complete, last_used); 1027 ret = dma_async_is_complete(cookie, last_complete, last_used);
1022 } 1028 }
1023 1029
1024 spin_unlock_bh(&atchan->lock); 1030 spin_unlock_irqrestore(&atchan->lock, flags);
1025 1031
1026 if (ret != DMA_SUCCESS) 1032 if (ret != DMA_SUCCESS)
1027 dma_set_tx_state(txstate, last_complete, last_used, 1033 dma_set_tx_state(txstate, last_complete, last_used,
@@ -1029,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
1029 else 1035 else
1030 dma_set_tx_state(txstate, last_complete, last_used, 0); 1036 dma_set_tx_state(txstate, last_complete, last_used, 0);
1031 1037
1032 if (test_bit(ATC_IS_PAUSED, &atchan->status)) 1038 if (atc_chan_is_paused(atchan))
1033 ret = DMA_PAUSED; 1039 ret = DMA_PAUSED;
1034 1040
1035 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 1041 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@@ -1046,18 +1052,19 @@ atc_tx_status(struct dma_chan *chan,
1046static void atc_issue_pending(struct dma_chan *chan) 1052static void atc_issue_pending(struct dma_chan *chan)
1047{ 1053{
1048 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1054 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1055 unsigned long flags;
1049 1056
1050 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1057 dev_vdbg(chan2dev(chan), "issue_pending\n");
1051 1058
1052 /* Not needed for cyclic transfers */ 1059 /* Not needed for cyclic transfers */
1053 if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 1060 if (atc_chan_is_cyclic(atchan))
1054 return; 1061 return;
1055 1062
1056 spin_lock_bh(&atchan->lock); 1063 spin_lock_irqsave(&atchan->lock, flags);
1057 if (!atc_chan_is_enabled(atchan)) { 1064 if (!atc_chan_is_enabled(atchan)) {
1058 atc_advance_work(atchan); 1065 atc_advance_work(atchan);
1059 } 1066 }
1060 spin_unlock_bh(&atchan->lock); 1067 spin_unlock_irqrestore(&atchan->lock, flags);
1061} 1068}
1062 1069
1063/** 1070/**
@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1073 struct at_dma *atdma = to_at_dma(chan->device); 1080 struct at_dma *atdma = to_at_dma(chan->device);
1074 struct at_desc *desc; 1081 struct at_desc *desc;
1075 struct at_dma_slave *atslave; 1082 struct at_dma_slave *atslave;
1083 unsigned long flags;
1076 int i; 1084 int i;
1077 u32 cfg; 1085 u32 cfg;
1078 LIST_HEAD(tmp_list); 1086 LIST_HEAD(tmp_list);
@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1116 list_add_tail(&desc->desc_node, &tmp_list); 1124 list_add_tail(&desc->desc_node, &tmp_list);
1117 } 1125 }
1118 1126
1119 spin_lock_bh(&atchan->lock); 1127 spin_lock_irqsave(&atchan->lock, flags);
1120 atchan->descs_allocated = i; 1128 atchan->descs_allocated = i;
1121 list_splice(&tmp_list, &atchan->free_list); 1129 list_splice(&tmp_list, &atchan->free_list);
1122 atchan->completed_cookie = chan->cookie = 1; 1130 atchan->completed_cookie = chan->cookie = 1;
1123 spin_unlock_bh(&atchan->lock); 1131 spin_unlock_irqrestore(&atchan->lock, flags);
1124 1132
1125 /* channel parameters */ 1133 /* channel parameters */
1126 channel_writel(atchan, CFG, cfg); 1134 channel_writel(atchan, CFG, cfg);
@@ -1293,15 +1301,13 @@ static int __init at_dma_probe(struct platform_device *pdev)
1293 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1301 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1294 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1302 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1295 1303
1296 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) 1304 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1297 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1305 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1298 1306 /* controller can do slave DMA: can trigger cyclic transfers */
1299 if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) 1307 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1300 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1308 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1301
1302 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
1303 dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1304 atdma->dma_common.device_control = atc_control; 1309 atdma->dma_common.device_control = atc_control;
1310 }
1305 1311
1306 dma_writel(atdma, EN, AT_DMA_ENABLE); 1312 dma_writel(atdma, EN, AT_DMA_ENABLE);
1307 1313
@@ -1377,27 +1383,112 @@ static void at_dma_shutdown(struct platform_device *pdev)
1377 clk_disable(atdma->clk); 1383 clk_disable(atdma->clk);
1378} 1384}
1379 1385
1386static int at_dma_prepare(struct device *dev)
1387{
1388 struct platform_device *pdev = to_platform_device(dev);
1389 struct at_dma *atdma = platform_get_drvdata(pdev);
1390 struct dma_chan *chan, *_chan;
1391
1392 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1393 device_node) {
1394 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1395 /* wait for transaction completion (except in cyclic case) */
1396 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1397 return -EAGAIN;
1398 }
1399 return 0;
1400}
1401
1402static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1403{
1404 struct dma_chan *chan = &atchan->chan_common;
1405
1406 /* Channel should be paused by user
1407 * do it anyway even if it is not done already */
1408 if (!atc_chan_is_paused(atchan)) {
1409 dev_warn(chan2dev(chan),
1410 "cyclic channel not paused, should be done by channel user\n");
1411 atc_control(chan, DMA_PAUSE, 0);
1412 }
1413
1414 /* now preserve additional data for cyclic operations */
1415 /* next descriptor address in the cyclic list */
1416 atchan->save_dscr = channel_readl(atchan, DSCR);
1417
1418 vdbg_dump_regs(atchan);
1419}
1420
1380static int at_dma_suspend_noirq(struct device *dev) 1421static int at_dma_suspend_noirq(struct device *dev)
1381{ 1422{
1382 struct platform_device *pdev = to_platform_device(dev); 1423 struct platform_device *pdev = to_platform_device(dev);
1383 struct at_dma *atdma = platform_get_drvdata(pdev); 1424 struct at_dma *atdma = platform_get_drvdata(pdev);
1425 struct dma_chan *chan, *_chan;
1384 1426
1385 at_dma_off(platform_get_drvdata(pdev)); 1427 /* preserve data */
1428 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1429 device_node) {
1430 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1431
1432 if (atc_chan_is_cyclic(atchan))
1433 atc_suspend_cyclic(atchan);
1434 atchan->save_cfg = channel_readl(atchan, CFG);
1435 }
1436 atdma->save_imr = dma_readl(atdma, EBCIMR);
1437
1438 /* disable DMA controller */
1439 at_dma_off(atdma);
1386 clk_disable(atdma->clk); 1440 clk_disable(atdma->clk);
1387 return 0; 1441 return 0;
1388} 1442}
1389 1443
1444static void atc_resume_cyclic(struct at_dma_chan *atchan)
1445{
1446 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1447
1448 /* restore channel status for cyclic descriptors list:
1449 * next descriptor in the cyclic list at the time of suspend */
1450 channel_writel(atchan, SADDR, 0);
1451 channel_writel(atchan, DADDR, 0);
1452 channel_writel(atchan, CTRLA, 0);
1453 channel_writel(atchan, CTRLB, 0);
1454 channel_writel(atchan, DSCR, atchan->save_dscr);
1455 dma_writel(atdma, CHER, atchan->mask);
1456
1457 /* channel pause status should be removed by channel user
1458 * We cannot take the initiative to do it here */
1459
1460 vdbg_dump_regs(atchan);
1461}
1462
1390static int at_dma_resume_noirq(struct device *dev) 1463static int at_dma_resume_noirq(struct device *dev)
1391{ 1464{
1392 struct platform_device *pdev = to_platform_device(dev); 1465 struct platform_device *pdev = to_platform_device(dev);
1393 struct at_dma *atdma = platform_get_drvdata(pdev); 1466 struct at_dma *atdma = platform_get_drvdata(pdev);
1467 struct dma_chan *chan, *_chan;
1394 1468
1469 /* bring back DMA controller */
1395 clk_enable(atdma->clk); 1470 clk_enable(atdma->clk);
1396 dma_writel(atdma, EN, AT_DMA_ENABLE); 1471 dma_writel(atdma, EN, AT_DMA_ENABLE);
1472
1473 /* clear any pending interrupt */
1474 while (dma_readl(atdma, EBCISR))
1475 cpu_relax();
1476
1477 /* restore saved data */
1478 dma_writel(atdma, EBCIER, atdma->save_imr);
1479 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1480 device_node) {
1481 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1482
1483 channel_writel(atchan, CFG, atchan->save_cfg);
1484 if (atc_chan_is_cyclic(atchan))
1485 atc_resume_cyclic(atchan);
1486 }
1397 return 0; 1487 return 0;
1398} 1488}
1399 1489
1400static const struct dev_pm_ops at_dma_dev_pm_ops = { 1490static const struct dev_pm_ops at_dma_dev_pm_ops = {
1491 .prepare = at_dma_prepare,
1401 .suspend_noirq = at_dma_suspend_noirq, 1492 .suspend_noirq = at_dma_suspend_noirq,
1402 .resume_noirq = at_dma_resume_noirq, 1493 .resume_noirq = at_dma_resume_noirq,
1403}; 1494};
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 087dbf1dd39c..aa4c9aebab7c 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -204,6 +204,9 @@ enum atc_status {
204 * @status: transmit status information from irq/prep* functions 204 * @status: transmit status information from irq/prep* functions
205 * to tasklet (use atomic operations) 205 * to tasklet (use atomic operations)
206 * @tasklet: bottom half to finish transaction work 206 * @tasklet: bottom half to finish transaction work
207 * @save_cfg: configuration register that is saved on suspend/resume cycle
208 * @save_dscr: for cyclic operations, preserve next descriptor address in
209 * the cyclic list on suspend/resume cycle
207 * @lock: serializes enqueue/dequeue operations to descriptors lists 210 * @lock: serializes enqueue/dequeue operations to descriptors lists
208 * @completed_cookie: identifier for the most recently completed operation 211 * @completed_cookie: identifier for the most recently completed operation
209 * @active_list: list of descriptors dmaengine is being running on 212 * @active_list: list of descriptors dmaengine is being running on
@@ -218,6 +221,8 @@ struct at_dma_chan {
218 u8 mask; 221 u8 mask;
219 unsigned long status; 222 unsigned long status;
220 struct tasklet_struct tasklet; 223 struct tasklet_struct tasklet;
224 u32 save_cfg;
225 u32 save_dscr;
221 226
222 spinlock_t lock; 227 spinlock_t lock;
223 228
@@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
248 * @chan_common: common dmaengine dma_device object members 253 * @chan_common: common dmaengine dma_device object members
249 * @ch_regs: memory mapped register base 254 * @ch_regs: memory mapped register base
250 * @clk: dma controller clock 255 * @clk: dma controller clock
256 * @save_imr: interrupt mask register that is saved on suspend/resume cycle
251 * @all_chan_mask: all channels availlable in a mask 257 * @all_chan_mask: all channels availlable in a mask
252 * @dma_desc_pool: base of DMA descriptor region (DMA address) 258 * @dma_desc_pool: base of DMA descriptor region (DMA address)
253 * @chan: channels table to store at_dma_chan structures 259 * @chan: channels table to store at_dma_chan structures
@@ -256,6 +262,7 @@ struct at_dma {
256 struct dma_device dma_common; 262 struct dma_device dma_common;
257 void __iomem *regs; 263 void __iomem *regs;
258 struct clk *clk; 264 struct clk *clk;
265 u32 save_imr;
259 266
260 u8 all_chan_mask; 267 u8 all_chan_mask;
261 268
@@ -355,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
355 return !!(dma_readl(atdma, CHSR) & atchan->mask); 362 return !!(dma_readl(atdma, CHSR) & atchan->mask);
356} 363}
357 364
365/**
366 * atc_chan_is_paused - test channel pause/resume status
367 * @atchan: channel we want to test status
368 */
369static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
370{
371 return test_bit(ATC_IS_PAUSED, &atchan->status);
372}
373
374/**
375 * atc_chan_is_cyclic - test if given channel has cyclic property set
376 * @atchan: channel we want to test status
377 */
378static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
379{
380 return test_bit(ATC_IS_CYCLIC, &atchan->status);
381}
358 382
359/** 383/**
360 * set_desc_eol - set end-of-link to descriptor so it will end transfer 384 * set_desc_eol - set end-of-link to descriptor so it will end transfer
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 765f5ff22304..eb1d8641cf5c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -10,6 +10,7 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h> 12#include <linux/dmaengine.h>
13#include <linux/freezer.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/kthread.h> 15#include <linux/kthread.h>
15#include <linux/module.h> 16#include <linux/module.h>
@@ -251,6 +252,7 @@ static int dmatest_func(void *data)
251 int i; 252 int i;
252 253
253 thread_name = current->comm; 254 thread_name = current->comm;
255 set_freezable_with_signal();
254 256
255 ret = -ENOMEM; 257 ret = -ENOMEM;
256 258
@@ -305,7 +307,8 @@ static int dmatest_func(void *data)
305 dma_addr_t dma_srcs[src_cnt]; 307 dma_addr_t dma_srcs[src_cnt];
306 dma_addr_t dma_dsts[dst_cnt]; 308 dma_addr_t dma_dsts[dst_cnt];
307 struct completion cmp; 309 struct completion cmp;
308 unsigned long tmo = msecs_to_jiffies(timeout); 310 unsigned long start, tmo, end = 0 /* compiler... */;
311 bool reload = true;
309 u8 align = 0; 312 u8 align = 0;
310 313
311 total_tests++; 314 total_tests++;
@@ -404,7 +407,17 @@ static int dmatest_func(void *data)
404 } 407 }
405 dma_async_issue_pending(chan); 408 dma_async_issue_pending(chan);
406 409
407 tmo = wait_for_completion_timeout(&cmp, tmo); 410 do {
411 start = jiffies;
412 if (reload)
413 end = start + msecs_to_jiffies(timeout);
414 else if (end <= start)
415 end = start + 1;
416 tmo = wait_for_completion_interruptible_timeout(&cmp,
417 end - start);
418 reload = try_to_freeze();
419 } while (tmo == -ERESTARTSYS);
420
408 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 421 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
409 422
410 if (tmo == 0) { 423 if (tmo == 0) {
@@ -477,6 +490,8 @@ err_srcs:
477 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 490 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
478 thread_name, total_tests, failed_tests, ret); 491 thread_name, total_tests, failed_tests, ret);
479 492
493 /* terminate all transfers on specified channels */
494 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
480 if (iterations > 0) 495 if (iterations > 0)
481 while (!kthread_should_stop()) { 496 while (!kthread_should_stop()) {
482 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); 497 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@@ -499,6 +514,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
499 list_del(&thread->node); 514 list_del(&thread->node);
500 kfree(thread); 515 kfree(thread);
501 } 516 }
517
518 /* terminate all transfers on specified channels */
519 dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
520
502 kfree(dtc); 521 kfree(dtc);
503} 522}
504 523
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7bd7e98548cd..b5cc27dc9a51 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -318,6 +318,7 @@ struct sdma_engine {
318 dma_addr_t context_phys; 318 dma_addr_t context_phys;
319 struct dma_device dma_device; 319 struct dma_device dma_device;
320 struct clk *clk; 320 struct clk *clk;
321 struct mutex channel_0_lock;
321 struct sdma_script_start_addrs *script_addrs; 322 struct sdma_script_start_addrs *script_addrs;
322}; 323};
323 324
@@ -415,11 +416,15 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
415 dma_addr_t buf_phys; 416 dma_addr_t buf_phys;
416 int ret; 417 int ret;
417 418
419 mutex_lock(&sdma->channel_0_lock);
420
418 buf_virt = dma_alloc_coherent(NULL, 421 buf_virt = dma_alloc_coherent(NULL,
419 size, 422 size,
420 &buf_phys, GFP_KERNEL); 423 &buf_phys, GFP_KERNEL);
421 if (!buf_virt) 424 if (!buf_virt) {
422 return -ENOMEM; 425 ret = -ENOMEM;
426 goto err_out;
427 }
423 428
424 bd0->mode.command = C0_SETPM; 429 bd0->mode.command = C0_SETPM;
425 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 430 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
@@ -433,6 +438,9 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
433 438
434 dma_free_coherent(NULL, size, buf_virt, buf_phys); 439 dma_free_coherent(NULL, size, buf_virt, buf_phys);
435 440
441err_out:
442 mutex_unlock(&sdma->channel_0_lock);
443
436 return ret; 444 return ret;
437} 445}
438 446
@@ -656,6 +664,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
656 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); 664 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
657 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); 665 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
658 666
667 mutex_lock(&sdma->channel_0_lock);
668
659 memset(context, 0, sizeof(*context)); 669 memset(context, 0, sizeof(*context));
660 context->channel_state.pc = load_address; 670 context->channel_state.pc = load_address;
661 671
@@ -676,6 +686,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
676 686
677 ret = sdma_run_channel(&sdma->channel[0]); 687 ret = sdma_run_channel(&sdma->channel[0]);
678 688
689 mutex_unlock(&sdma->channel_0_lock);
690
679 return ret; 691 return ret;
680} 692}
681 693
@@ -1131,18 +1143,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
1131 saddr_arr[i] = addr_arr[i]; 1143 saddr_arr[i] = addr_arr[i];
1132} 1144}
1133 1145
1134static int __init sdma_get_firmware(struct sdma_engine *sdma, 1146static void sdma_load_firmware(const struct firmware *fw, void *context)
1135 const char *fw_name)
1136{ 1147{
1137 const struct firmware *fw; 1148 struct sdma_engine *sdma = context;
1138 const struct sdma_firmware_header *header; 1149 const struct sdma_firmware_header *header;
1139 int ret;
1140 const struct sdma_script_start_addrs *addr; 1150 const struct sdma_script_start_addrs *addr;
1141 unsigned short *ram_code; 1151 unsigned short *ram_code;
1142 1152
1143 ret = request_firmware(&fw, fw_name, sdma->dev); 1153 if (!fw) {
1144 if (ret) 1154 dev_err(sdma->dev, "firmware not found\n");
1145 return ret; 1155 return;
1156 }
1146 1157
1147 if (fw->size < sizeof(*header)) 1158 if (fw->size < sizeof(*header))
1148 goto err_firmware; 1159 goto err_firmware;
@@ -1172,6 +1183,16 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
1172 1183
1173err_firmware: 1184err_firmware:
1174 release_firmware(fw); 1185 release_firmware(fw);
1186}
1187
1188static int __init sdma_get_firmware(struct sdma_engine *sdma,
1189 const char *fw_name)
1190{
1191 int ret;
1192
1193 ret = request_firmware_nowait(THIS_MODULE,
1194 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1195 GFP_KERNEL, sdma, sdma_load_firmware);
1175 1196
1176 return ret; 1197 return ret;
1177} 1198}
@@ -1269,11 +1290,14 @@ static int __init sdma_probe(struct platform_device *pdev)
1269 struct sdma_platform_data *pdata = pdev->dev.platform_data; 1290 struct sdma_platform_data *pdata = pdev->dev.platform_data;
1270 int i; 1291 int i;
1271 struct sdma_engine *sdma; 1292 struct sdma_engine *sdma;
1293 s32 *saddr_arr;
1272 1294
1273 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1295 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1274 if (!sdma) 1296 if (!sdma)
1275 return -ENOMEM; 1297 return -ENOMEM;
1276 1298
1299 mutex_init(&sdma->channel_0_lock);
1300
1277 sdma->dev = &pdev->dev; 1301 sdma->dev = &pdev->dev;
1278 1302
1279 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1303 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1310,6 +1334,11 @@ static int __init sdma_probe(struct platform_device *pdev)
1310 goto err_alloc; 1334 goto err_alloc;
1311 } 1335 }
1312 1336
1337 /* initially no scripts available */
1338 saddr_arr = (s32 *)sdma->script_addrs;
1339 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1340 saddr_arr[i] = -EINVAL;
1341
1313 if (of_id) 1342 if (of_id)
1314 pdev->id_entry = of_id->data; 1343 pdev->id_entry = of_id->data;
1315 sdma->devtype = pdev->id_entry->driver_data; 1344 sdma->devtype = pdev->id_entry->driver_data;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index be641cbd36fc..b4588bdd98bb 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -130,6 +130,23 @@ struct mxs_dma_engine {
130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; 130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
131}; 131};
132 132
133static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
134{
135 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
136 int chan_id = mxs_chan->chan.chan_id;
137 int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
138
139 /* enable apbh channel clock */
140 if (dma_is_apbh()) {
141 if (apbh_is_old())
142 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
143 mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
144 else
145 writel(1 << chan_id,
146 mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
147 }
148}
149
133static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 150static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
134{ 151{
135 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 152 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -148,38 +165,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
148 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 165 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
149 int chan_id = mxs_chan->chan.chan_id; 166 int chan_id = mxs_chan->chan.chan_id;
150 167
168 /* clkgate needs to be enabled before writing other registers */
169 mxs_dma_clkgate(mxs_chan, 1);
170
151 /* set cmd_addr up */ 171 /* set cmd_addr up */
152 writel(mxs_chan->ccw_phys, 172 writel(mxs_chan->ccw_phys,
153 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); 173 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
154 174
155 /* enable apbh channel clock */
156 if (dma_is_apbh()) {
157 if (apbh_is_old())
158 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
159 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
160 else
161 writel(1 << chan_id,
162 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
163 }
164
165 /* write 1 to SEMA to kick off the channel */ 175 /* write 1 to SEMA to kick off the channel */
166 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); 176 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
167} 177}
168 178
169static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 179static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
170{ 180{
171 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
172 int chan_id = mxs_chan->chan.chan_id;
173
174 /* disable apbh channel clock */ 181 /* disable apbh channel clock */
175 if (dma_is_apbh()) { 182 mxs_dma_clkgate(mxs_chan, 0);
176 if (apbh_is_old())
177 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
178 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
179 else
180 writel(1 << chan_id,
181 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
182 }
183 183
184 mxs_chan->status = DMA_SUCCESS; 184 mxs_chan->status = DMA_SUCCESS;
185} 185}
@@ -338,7 +338,10 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
338 if (ret) 338 if (ret)
339 goto err_clk; 339 goto err_clk;
340 340
341 /* clkgate needs to be enabled for reset to finish */
342 mxs_dma_clkgate(mxs_chan, 1);
341 mxs_dma_reset_chan(mxs_chan); 343 mxs_dma_reset_chan(mxs_chan);
344 mxs_dma_clkgate(mxs_chan, 0);
342 345
343 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 346 dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
344 mxs_chan->desc.tx_submit = mxs_dma_tx_submit; 347 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 00eee59e8b33..621134fdba4c 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -17,6 +17,8 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/amba/bus.h> 18#include <linux/amba/bus.h>
19#include <linux/amba/pl330.h> 19#include <linux/amba/pl330.h>
20#include <linux/pm_runtime.h>
21#include <linux/scatterlist.h>
20 22
21#define NR_DEFAULT_DESC 16 23#define NR_DEFAULT_DESC 16
22 24
@@ -68,6 +70,14 @@ struct dma_pl330_chan {
68 * NULL if the channel is available to be acquired. 70 * NULL if the channel is available to be acquired.
69 */ 71 */
70 void *pl330_chid; 72 void *pl330_chid;
73
74 /* For D-to-M and M-to-D channels */
75 int burst_sz; /* the peripheral fifo width */
76 int burst_len; /* the number of burst */
77 dma_addr_t fifo_addr;
78
79 /* for cyclic capability */
80 bool cyclic;
71}; 81};
72 82
73struct dma_pl330_dmac { 83struct dma_pl330_dmac {
@@ -83,6 +93,8 @@ struct dma_pl330_dmac {
83 93
84 /* Peripheral channels connected to this DMAC */ 94 /* Peripheral channels connected to this DMAC */
85 struct dma_pl330_chan *peripherals; /* keep at end */ 95 struct dma_pl330_chan *peripherals; /* keep at end */
96
97 struct clk *clk;
86}; 98};
87 99
88struct dma_pl330_desc { 100struct dma_pl330_desc {
@@ -152,6 +164,31 @@ static inline void free_desc_list(struct list_head *list)
152 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 164 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
153} 165}
154 166
167static inline void handle_cyclic_desc_list(struct list_head *list)
168{
169 struct dma_pl330_desc *desc;
170 struct dma_pl330_chan *pch;
171 unsigned long flags;
172
173 if (list_empty(list))
174 return;
175
176 list_for_each_entry(desc, list, node) {
177 dma_async_tx_callback callback;
178
179 /* Change status to reload it */
180 desc->status = PREP;
181 pch = desc->pchan;
182 callback = desc->txd.callback;
183 if (callback)
184 callback(desc->txd.callback_param);
185 }
186
187 spin_lock_irqsave(&pch->lock, flags);
188 list_splice_tail_init(list, &pch->work_list);
189 spin_unlock_irqrestore(&pch->lock, flags);
190}
191
155static inline void fill_queue(struct dma_pl330_chan *pch) 192static inline void fill_queue(struct dma_pl330_chan *pch)
156{ 193{
157 struct dma_pl330_desc *desc; 194 struct dma_pl330_desc *desc;
@@ -205,7 +242,10 @@ static void pl330_tasklet(unsigned long data)
205 242
206 spin_unlock_irqrestore(&pch->lock, flags); 243 spin_unlock_irqrestore(&pch->lock, flags);
207 244
208 free_desc_list(&list); 245 if (pch->cyclic)
246 handle_cyclic_desc_list(&list);
247 else
248 free_desc_list(&list);
209} 249}
210 250
211static void dma_pl330_rqcb(void *token, enum pl330_op_err err) 251static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -236,6 +276,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
236 spin_lock_irqsave(&pch->lock, flags); 276 spin_lock_irqsave(&pch->lock, flags);
237 277
238 pch->completed = chan->cookie = 1; 278 pch->completed = chan->cookie = 1;
279 pch->cyclic = false;
239 280
240 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 281 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
241 if (!pch->pl330_chid) { 282 if (!pch->pl330_chid) {
@@ -253,25 +294,52 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
253static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) 294static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
254{ 295{
255 struct dma_pl330_chan *pch = to_pchan(chan); 296 struct dma_pl330_chan *pch = to_pchan(chan);
256 struct dma_pl330_desc *desc; 297 struct dma_pl330_desc *desc, *_dt;
257 unsigned long flags; 298 unsigned long flags;
299 struct dma_pl330_dmac *pdmac = pch->dmac;
300 struct dma_slave_config *slave_config;
301 LIST_HEAD(list);
258 302
259 /* Only supports DMA_TERMINATE_ALL */ 303 switch (cmd) {
260 if (cmd != DMA_TERMINATE_ALL) 304 case DMA_TERMINATE_ALL:
261 return -ENXIO; 305 spin_lock_irqsave(&pch->lock, flags);
262
263 spin_lock_irqsave(&pch->lock, flags);
264
265 /* FLUSH the PL330 Channel thread */
266 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
267 306
268 /* Mark all desc done */ 307 /* FLUSH the PL330 Channel thread */
269 list_for_each_entry(desc, &pch->work_list, node) 308 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
270 desc->status = DONE;
271 309
272 spin_unlock_irqrestore(&pch->lock, flags); 310 /* Mark all desc done */
311 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
312 desc->status = DONE;
313 pch->completed = desc->txd.cookie;
314 list_move_tail(&desc->node, &list);
315 }
273 316
274 pl330_tasklet((unsigned long) pch); 317 list_splice_tail_init(&list, &pdmac->desc_pool);
318 spin_unlock_irqrestore(&pch->lock, flags);
319 break;
320 case DMA_SLAVE_CONFIG:
321 slave_config = (struct dma_slave_config *)arg;
322
323 if (slave_config->direction == DMA_TO_DEVICE) {
324 if (slave_config->dst_addr)
325 pch->fifo_addr = slave_config->dst_addr;
326 if (slave_config->dst_addr_width)
327 pch->burst_sz = __ffs(slave_config->dst_addr_width);
328 if (slave_config->dst_maxburst)
329 pch->burst_len = slave_config->dst_maxburst;
330 } else if (slave_config->direction == DMA_FROM_DEVICE) {
331 if (slave_config->src_addr)
332 pch->fifo_addr = slave_config->src_addr;
333 if (slave_config->src_addr_width)
334 pch->burst_sz = __ffs(slave_config->src_addr_width);
335 if (slave_config->src_maxburst)
336 pch->burst_len = slave_config->src_maxburst;
337 }
338 break;
339 default:
340 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
341 return -ENXIO;
342 }
275 343
276 return 0; 344 return 0;
277} 345}
@@ -288,6 +356,9 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
288 pl330_release_channel(pch->pl330_chid); 356 pl330_release_channel(pch->pl330_chid);
289 pch->pl330_chid = NULL; 357 pch->pl330_chid = NULL;
290 358
359 if (pch->cyclic)
360 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
361
291 spin_unlock_irqrestore(&pch->lock, flags); 362 spin_unlock_irqrestore(&pch->lock, flags);
292} 363}
293 364
@@ -453,7 +524,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
453 524
454 if (peri) { 525 if (peri) {
455 desc->req.rqtype = peri->rqtype; 526 desc->req.rqtype = peri->rqtype;
456 desc->req.peri = peri->peri_id; 527 desc->req.peri = pch->chan.chan_id;
457 } else { 528 } else {
458 desc->req.rqtype = MEMTOMEM; 529 desc->req.rqtype = MEMTOMEM;
459 desc->req.peri = 0; 530 desc->req.peri = 0;
@@ -524,6 +595,51 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
524 return burst_len; 595 return burst_len;
525} 596}
526 597
598static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
599 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
600 size_t period_len, enum dma_data_direction direction)
601{
602 struct dma_pl330_desc *desc;
603 struct dma_pl330_chan *pch = to_pchan(chan);
604 dma_addr_t dst;
605 dma_addr_t src;
606
607 desc = pl330_get_desc(pch);
608 if (!desc) {
609 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
610 __func__, __LINE__);
611 return NULL;
612 }
613
614 switch (direction) {
615 case DMA_TO_DEVICE:
616 desc->rqcfg.src_inc = 1;
617 desc->rqcfg.dst_inc = 0;
618 src = dma_addr;
619 dst = pch->fifo_addr;
620 break;
621 case DMA_FROM_DEVICE:
622 desc->rqcfg.src_inc = 0;
623 desc->rqcfg.dst_inc = 1;
624 src = pch->fifo_addr;
625 dst = dma_addr;
626 break;
627 default:
628 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
629 __func__, __LINE__);
630 return NULL;
631 }
632
633 desc->rqcfg.brst_size = pch->burst_sz;
634 desc->rqcfg.brst_len = 1;
635
636 pch->cyclic = true;
637
638 fill_px(&desc->px, dst, src, period_len);
639
640 return &desc->txd;
641}
642
527static struct dma_async_tx_descriptor * 643static struct dma_async_tx_descriptor *
528pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, 644pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
529 dma_addr_t src, size_t len, unsigned long flags) 645 dma_addr_t src, size_t len, unsigned long flags)
@@ -579,7 +695,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
579 struct dma_pl330_peri *peri = chan->private; 695 struct dma_pl330_peri *peri = chan->private;
580 struct scatterlist *sg; 696 struct scatterlist *sg;
581 unsigned long flags; 697 unsigned long flags;
582 int i, burst_size; 698 int i;
583 dma_addr_t addr; 699 dma_addr_t addr;
584 700
585 if (unlikely(!pch || !sgl || !sg_len || !peri)) 701 if (unlikely(!pch || !sgl || !sg_len || !peri))
@@ -595,8 +711,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
595 return NULL; 711 return NULL;
596 } 712 }
597 713
598 addr = peri->fifo_addr; 714 addr = pch->fifo_addr;
599 burst_size = peri->burst_sz;
600 715
601 first = NULL; 716 first = NULL;
602 717
@@ -644,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
644 sg_dma_address(sg), addr, sg_dma_len(sg)); 759 sg_dma_address(sg), addr, sg_dma_len(sg));
645 } 760 }
646 761
647 desc->rqcfg.brst_size = burst_size; 762 desc->rqcfg.brst_size = pch->burst_sz;
648 desc->rqcfg.brst_len = 1; 763 desc->rqcfg.brst_len = 1;
649 } 764 }
650 765
@@ -696,6 +811,30 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
696 goto probe_err1; 811 goto probe_err1;
697 } 812 }
698 813
814 pdmac->clk = clk_get(&adev->dev, "dma");
815 if (IS_ERR(pdmac->clk)) {
816 dev_err(&adev->dev, "Cannot get operation clock.\n");
817 ret = -EINVAL;
818 goto probe_err1;
819 }
820
821 amba_set_drvdata(adev, pdmac);
822
823#ifdef CONFIG_PM_RUNTIME
824 /* to use the runtime PM helper functions */
825 pm_runtime_enable(&adev->dev);
826
827 /* enable the power domain */
828 if (pm_runtime_get_sync(&adev->dev)) {
829 dev_err(&adev->dev, "failed to get runtime pm\n");
830 ret = -ENODEV;
831 goto probe_err1;
832 }
833#else
834 /* enable dma clk */
835 clk_enable(pdmac->clk);
836#endif
837
699 irq = adev->irq[0]; 838 irq = adev->irq[0];
700 ret = request_irq(irq, pl330_irq_handler, 0, 839 ret = request_irq(irq, pl330_irq_handler, 0,
701 dev_name(&adev->dev), pi); 840 dev_name(&adev->dev), pi);
@@ -732,6 +871,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
732 case MEMTODEV: 871 case MEMTODEV:
733 case DEVTOMEM: 872 case DEVTOMEM:
734 dma_cap_set(DMA_SLAVE, pd->cap_mask); 873 dma_cap_set(DMA_SLAVE, pd->cap_mask);
874 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
735 break; 875 break;
736 default: 876 default:
737 dev_err(&adev->dev, "DEVTODEV Not Supported\n"); 877 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
@@ -760,6 +900,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
760 pd->device_alloc_chan_resources = pl330_alloc_chan_resources; 900 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
761 pd->device_free_chan_resources = pl330_free_chan_resources; 901 pd->device_free_chan_resources = pl330_free_chan_resources;
762 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; 902 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
903 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
763 pd->device_tx_status = pl330_tx_status; 904 pd->device_tx_status = pl330_tx_status;
764 pd->device_prep_slave_sg = pl330_prep_slave_sg; 905 pd->device_prep_slave_sg = pl330_prep_slave_sg;
765 pd->device_control = pl330_control; 906 pd->device_control = pl330_control;
@@ -771,8 +912,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
771 goto probe_err4; 912 goto probe_err4;
772 } 913 }
773 914
774 amba_set_drvdata(adev, pdmac);
775
776 dev_info(&adev->dev, 915 dev_info(&adev->dev,
777 "Loaded driver for PL330 DMAC-%d\n", adev->periphid); 916 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
778 dev_info(&adev->dev, 917 dev_info(&adev->dev,
@@ -833,6 +972,13 @@ static int __devexit pl330_remove(struct amba_device *adev)
833 res = &adev->res; 972 res = &adev->res;
834 release_mem_region(res->start, resource_size(res)); 973 release_mem_region(res->start, resource_size(res));
835 974
975#ifdef CONFIG_PM_RUNTIME
976 pm_runtime_put(&adev->dev);
977 pm_runtime_disable(&adev->dev);
978#else
979 clk_disable(pdmac->clk);
980#endif
981
836 kfree(pdmac); 982 kfree(pdmac);
837 983
838 return 0; 984 return 0;
@@ -846,10 +992,49 @@ static struct amba_id pl330_ids[] = {
846 { 0, 0 }, 992 { 0, 0 },
847}; 993};
848 994
995#ifdef CONFIG_PM_RUNTIME
996static int pl330_runtime_suspend(struct device *dev)
997{
998 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
999
1000 if (!pdmac) {
1001 dev_err(dev, "failed to get dmac\n");
1002 return -ENODEV;
1003 }
1004
1005 clk_disable(pdmac->clk);
1006
1007 return 0;
1008}
1009
1010static int pl330_runtime_resume(struct device *dev)
1011{
1012 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
1013
1014 if (!pdmac) {
1015 dev_err(dev, "failed to get dmac\n");
1016 return -ENODEV;
1017 }
1018
1019 clk_enable(pdmac->clk);
1020
1021 return 0;
1022}
1023#else
1024#define pl330_runtime_suspend NULL
1025#define pl330_runtime_resume NULL
1026#endif /* CONFIG_PM_RUNTIME */
1027
1028static const struct dev_pm_ops pl330_pm_ops = {
1029 .runtime_suspend = pl330_runtime_suspend,
1030 .runtime_resume = pl330_runtime_resume,
1031};
1032
849static struct amba_driver pl330_driver = { 1033static struct amba_driver pl330_driver = {
850 .drv = { 1034 .drv = {
851 .owner = THIS_MODULE, 1035 .owner = THIS_MODULE,
852 .name = "dma-pl330", 1036 .name = "dma-pl330",
1037 .pm = &pl330_pm_ops,
853 }, 1038 },
854 .id_table = pl330_ids, 1039 .id_table = pl330_ids,
855 .probe = pl330_probe, 1040 .probe = pl330_probe,
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index a04f87d7ee3d..03cfdab99c8f 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -913,9 +913,9 @@ request_done:
913} 913}
914 914
915static void s3cmci_dma_setup(struct s3cmci_host *host, 915static void s3cmci_dma_setup(struct s3cmci_host *host,
916 enum s3c2410_dmasrc source) 916 enum dma_data_direction source)
917{ 917{
918 static enum s3c2410_dmasrc last_source = -1; 918 static enum dma_data_direction last_source = -1;
919 static int setup_ok; 919 static int setup_ok;
920 920
921 if (last_source == source) 921 if (last_source == source)
@@ -1087,7 +1087,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
1087 1087
1088 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); 1088 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
1089 1089
1090 s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW); 1090 s3cmci_dma_setup(host, rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1091 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 1091 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
1092 1092
1093 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1093 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 595dacc7645f..019a7163572f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -131,6 +131,12 @@
131#define RXBUSY (1<<2) 131#define RXBUSY (1<<2)
132#define TXBUSY (1<<3) 132#define TXBUSY (1<<3)
133 133
134struct s3c64xx_spi_dma_data {
135 unsigned ch;
136 enum dma_data_direction direction;
137 enum dma_ch dmach;
138};
139
134/** 140/**
135 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. 141 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
136 * @clk: Pointer to the spi clock. 142 * @clk: Pointer to the spi clock.
@@ -164,13 +170,14 @@ struct s3c64xx_spi_driver_data {
164 struct work_struct work; 170 struct work_struct work;
165 struct list_head queue; 171 struct list_head queue;
166 spinlock_t lock; 172 spinlock_t lock;
167 enum dma_ch rx_dmach;
168 enum dma_ch tx_dmach;
169 unsigned long sfr_start; 173 unsigned long sfr_start;
170 struct completion xfer_completion; 174 struct completion xfer_completion;
171 unsigned state; 175 unsigned state;
172 unsigned cur_mode, cur_bpw; 176 unsigned cur_mode, cur_bpw;
173 unsigned cur_speed; 177 unsigned cur_speed;
178 struct s3c64xx_spi_dma_data rx_dma;
179 struct s3c64xx_spi_dma_data tx_dma;
180 struct samsung_dma_ops *ops;
174}; 181};
175 182
176static struct s3c2410_dma_client s3c64xx_spi_dma_client = { 183static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
@@ -226,6 +233,78 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
226 writel(val, regs + S3C64XX_SPI_CH_CFG); 233 writel(val, regs + S3C64XX_SPI_CH_CFG);
227} 234}
228 235
236static void s3c64xx_spi_dmacb(void *data)
237{
238 struct s3c64xx_spi_driver_data *sdd;
239 struct s3c64xx_spi_dma_data *dma = data;
240 unsigned long flags;
241
242 if (dma->direction == DMA_FROM_DEVICE)
243 sdd = container_of(data,
244 struct s3c64xx_spi_driver_data, rx_dma);
245 else
246 sdd = container_of(data,
247 struct s3c64xx_spi_driver_data, tx_dma);
248
249 spin_lock_irqsave(&sdd->lock, flags);
250
251 if (dma->direction == DMA_FROM_DEVICE) {
252 sdd->state &= ~RXBUSY;
253 if (!(sdd->state & TXBUSY))
254 complete(&sdd->xfer_completion);
255 } else {
256 sdd->state &= ~TXBUSY;
257 if (!(sdd->state & RXBUSY))
258 complete(&sdd->xfer_completion);
259 }
260
261 spin_unlock_irqrestore(&sdd->lock, flags);
262}
263
264static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
265 unsigned len, dma_addr_t buf)
266{
267 struct s3c64xx_spi_driver_data *sdd;
268 struct samsung_dma_prep_info info;
269
270 if (dma->direction == DMA_FROM_DEVICE)
271 sdd = container_of((void *)dma,
272 struct s3c64xx_spi_driver_data, rx_dma);
273 else
274 sdd = container_of((void *)dma,
275 struct s3c64xx_spi_driver_data, tx_dma);
276
277 info.cap = DMA_SLAVE;
278 info.len = len;
279 info.fp = s3c64xx_spi_dmacb;
280 info.fp_param = dma;
281 info.direction = dma->direction;
282 info.buf = buf;
283
284 sdd->ops->prepare(dma->ch, &info);
285 sdd->ops->trigger(dma->ch);
286}
287
288static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
289{
290 struct samsung_dma_info info;
291
292 sdd->ops = samsung_dma_get_ops();
293
294 info.cap = DMA_SLAVE;
295 info.client = &s3c64xx_spi_dma_client;
296 info.width = sdd->cur_bpw / 8;
297
298 info.direction = sdd->rx_dma.direction;
299 info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
300 sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &info);
301 info.direction = sdd->tx_dma.direction;
302 info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
303 sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &info);
304
305 return 1;
306}
307
229static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, 308static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
230 struct spi_device *spi, 309 struct spi_device *spi,
231 struct spi_transfer *xfer, int dma_mode) 310 struct spi_transfer *xfer, int dma_mode)
@@ -258,10 +337,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
258 chcfg |= S3C64XX_SPI_CH_TXCH_ON; 337 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
259 if (dma_mode) { 338 if (dma_mode) {
260 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; 339 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
261 s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); 340 prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
262 s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
263 xfer->tx_dma, xfer->len);
264 s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
265 } else { 341 } else {
266 switch (sdd->cur_bpw) { 342 switch (sdd->cur_bpw) {
267 case 32: 343 case 32:
@@ -293,10 +369,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
293 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 369 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
294 | S3C64XX_SPI_PACKET_CNT_EN, 370 | S3C64XX_SPI_PACKET_CNT_EN,
295 regs + S3C64XX_SPI_PACKET_CNT); 371 regs + S3C64XX_SPI_PACKET_CNT);
296 s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); 372 prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
297 s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
298 xfer->rx_dma, xfer->len);
299 s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
300 } 373 }
301 } 374 }
302 375
@@ -482,46 +555,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
482 } 555 }
483} 556}
484 557
485static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
486 int size, enum s3c2410_dma_buffresult res)
487{
488 struct s3c64xx_spi_driver_data *sdd = buf_id;
489 unsigned long flags;
490
491 spin_lock_irqsave(&sdd->lock, flags);
492
493 if (res == S3C2410_RES_OK)
494 sdd->state &= ~RXBUSY;
495 else
496 dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
497
498 /* If the other done */
499 if (!(sdd->state & TXBUSY))
500 complete(&sdd->xfer_completion);
501
502 spin_unlock_irqrestore(&sdd->lock, flags);
503}
504
505static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
506 int size, enum s3c2410_dma_buffresult res)
507{
508 struct s3c64xx_spi_driver_data *sdd = buf_id;
509 unsigned long flags;
510
511 spin_lock_irqsave(&sdd->lock, flags);
512
513 if (res == S3C2410_RES_OK)
514 sdd->state &= ~TXBUSY;
515 else
516 dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
517
518 /* If the other done */
519 if (!(sdd->state & RXBUSY))
520 complete(&sdd->xfer_completion);
521
522 spin_unlock_irqrestore(&sdd->lock, flags);
523}
524
525#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) 558#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
526 559
527static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, 560static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
@@ -696,12 +729,10 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
696 if (use_dma) { 729 if (use_dma) {
697 if (xfer->tx_buf != NULL 730 if (xfer->tx_buf != NULL
698 && (sdd->state & TXBUSY)) 731 && (sdd->state & TXBUSY))
699 s3c2410_dma_ctrl(sdd->tx_dmach, 732 sdd->ops->stop(sdd->tx_dma.ch);
700 S3C2410_DMAOP_FLUSH);
701 if (xfer->rx_buf != NULL 733 if (xfer->rx_buf != NULL
702 && (sdd->state & RXBUSY)) 734 && (sdd->state & RXBUSY))
703 s3c2410_dma_ctrl(sdd->rx_dmach, 735 sdd->ops->stop(sdd->rx_dma.ch);
704 S3C2410_DMAOP_FLUSH);
705 } 736 }
706 737
707 goto out; 738 goto out;
@@ -739,30 +770,6 @@ out:
739 msg->complete(msg->context); 770 msg->complete(msg->context);
740} 771}
741 772
742static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
743{
744 if (s3c2410_dma_request(sdd->rx_dmach,
745 &s3c64xx_spi_dma_client, NULL) < 0) {
746 dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
747 return 0;
748 }
749 s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
750 s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
751 sdd->sfr_start + S3C64XX_SPI_RX_DATA);
752
753 if (s3c2410_dma_request(sdd->tx_dmach,
754 &s3c64xx_spi_dma_client, NULL) < 0) {
755 dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
756 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
757 return 0;
758 }
759 s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
760 s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
761 sdd->sfr_start + S3C64XX_SPI_TX_DATA);
762
763 return 1;
764}
765
766static void s3c64xx_spi_work(struct work_struct *work) 773static void s3c64xx_spi_work(struct work_struct *work)
767{ 774{
768 struct s3c64xx_spi_driver_data *sdd = container_of(work, 775 struct s3c64xx_spi_driver_data *sdd = container_of(work,
@@ -799,8 +806,8 @@ static void s3c64xx_spi_work(struct work_struct *work)
799 spin_unlock_irqrestore(&sdd->lock, flags); 806 spin_unlock_irqrestore(&sdd->lock, flags);
800 807
801 /* Free DMA channels */ 808 /* Free DMA channels */
802 s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client); 809 sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
803 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); 810 sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
804} 811}
805 812
806static int s3c64xx_spi_transfer(struct spi_device *spi, 813static int s3c64xx_spi_transfer(struct spi_device *spi,
@@ -1017,8 +1024,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1017 sdd->cntrlr_info = sci; 1024 sdd->cntrlr_info = sci;
1018 sdd->pdev = pdev; 1025 sdd->pdev = pdev;
1019 sdd->sfr_start = mem_res->start; 1026 sdd->sfr_start = mem_res->start;
1020 sdd->tx_dmach = dmatx_res->start; 1027 sdd->tx_dma.dmach = dmatx_res->start;
1021 sdd->rx_dmach = dmarx_res->start; 1028 sdd->tx_dma.direction = DMA_TO_DEVICE;
1029 sdd->rx_dma.dmach = dmarx_res->start;
1030 sdd->rx_dma.direction = DMA_FROM_DEVICE;
1022 1031
1023 sdd->cur_bpw = 8; 1032 sdd->cur_bpw = 8;
1024 1033
@@ -1106,7 +1115,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1106 pdev->id, master->num_chipselect); 1115 pdev->id, master->num_chipselect);
1107 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", 1116 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
1108 mem_res->end, mem_res->start, 1117 mem_res->end, mem_res->start,
1109 sdd->rx_dmach, sdd->tx_dmach); 1118 sdd->rx_dma.dmach, sdd->tx_dma.dmach);
1110 1119
1111 return 0; 1120 return 0;
1112 1121