aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/amba-pl08x.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/amba-pl08x.c')
-rw-r--r--drivers/dma/amba-pl08x.c640
1 files changed, 316 insertions, 324 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index be21e3f138a8..b7cbd1ab1db1 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -66,32 +66,29 @@
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry. 67 * will then move to the next LLI entry.
68 * 68 *
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
73 *
74 * Global TODO: 69 * Global TODO:
75 * - Break out common code from arch/arm/mach-s3c64xx and share 70 * - Break out common code from arch/arm/mach-s3c64xx and share
76 */ 71 */
77#include <linux/device.h>
78#include <linux/init.h>
79#include <linux/module.h>
80#include <linux/interrupt.h>
81#include <linux/slab.h>
82#include <linux/delay.h>
83#include <linux/dma-mapping.h>
84#include <linux/dmapool.h>
85#include <linux/dmaengine.h>
86#include <linux/amba/bus.h> 72#include <linux/amba/bus.h>
87#include <linux/amba/pl08x.h> 73#include <linux/amba/pl08x.h>
88#include <linux/debugfs.h> 74#include <linux/debugfs.h>
75#include <linux/delay.h>
76#include <linux/device.h>
77#include <linux/dmaengine.h>
78#include <linux/dmapool.h>
79#include <linux/dma-mapping.h>
80#include <linux/init.h>
81#include <linux/interrupt.h>
82#include <linux/module.h>
83#include <linux/pm_runtime.h>
89#include <linux/seq_file.h> 84#include <linux/seq_file.h>
90 85#include <linux/slab.h>
91#include <asm/hardware/pl080.h> 86#include <asm/hardware/pl080.h>
92 87
93#define DRIVER_NAME "pl08xdmac" 88#define DRIVER_NAME "pl08xdmac"
94 89
90static struct amba_driver pl08x_amba_driver;
91
95/** 92/**
96 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 93 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
97 * @channels: the number of channels available in this variant 94 * @channels: the number of channels available in this variant
@@ -126,7 +123,8 @@ struct pl08x_lli {
126 * @phy_chans: array of data for the physical channels 123 * @phy_chans: array of data for the physical channels
127 * @pool: a pool for the LLI descriptors 124 * @pool: a pool for the LLI descriptors
128 * @pool_ctr: counter of LLIs in the pool 125 * @pool_ctr: counter of LLIs in the pool
129 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches 126 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
127 * fetches
130 * @mem_buses: set to indicate memory transfers on AHB2. 128 * @mem_buses: set to indicate memory transfers on AHB2.
131 * @lock: a spinlock for this struct 129 * @lock: a spinlock for this struct
132 */ 130 */
@@ -149,14 +147,6 @@ struct pl08x_driver_data {
149 * PL08X specific defines 147 * PL08X specific defines
150 */ 148 */
151 149
152/*
153 * Memory boundaries: the manual for PL08x says that the controller
154 * cannot read past a 1KiB boundary, so these defines are used to
155 * create transfer LLIs that do not cross such boundaries.
156 */
157#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
158#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
159
160/* Size (bytes) of each LLI buffer allocated for one transfer */ 150/* Size (bytes) of each LLI buffer allocated for one transfer */
161# define PL08X_LLI_TSFR_SIZE 0x2000 151# define PL08X_LLI_TSFR_SIZE 0x2000
162 152
@@ -272,7 +262,6 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
272 writel(val, ch->base + PL080_CH_CONFIG); 262 writel(val, ch->base + PL080_CH_CONFIG);
273} 263}
274 264
275
276/* 265/*
277 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 266 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
278 * clears any pending interrupt status. This should not be used for 267 * clears any pending interrupt status. This should not be used for
@@ -363,7 +352,9 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
363 if (!list_empty(&plchan->pend_list)) { 352 if (!list_empty(&plchan->pend_list)) {
364 struct pl08x_txd *txdi; 353 struct pl08x_txd *txdi;
365 list_for_each_entry(txdi, &plchan->pend_list, node) { 354 list_for_each_entry(txdi, &plchan->pend_list, node) {
366 bytes += txdi->len; 355 struct pl08x_sg *dsg;
356 list_for_each_entry(dsg, &txd->dsg_list, node)
357 bytes += dsg->len;
367 } 358 }
368 } 359 }
369 360
@@ -407,6 +398,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
407 return NULL; 398 return NULL;
408 } 399 }
409 400
401 pm_runtime_get_sync(&pl08x->adev->dev);
410 return ch; 402 return ch;
411} 403}
412 404
@@ -420,6 +412,8 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
420 /* Stop the channel and clear its interrupts */ 412 /* Stop the channel and clear its interrupts */
421 pl08x_terminate_phy_chan(pl08x, ch); 413 pl08x_terminate_phy_chan(pl08x, ch);
422 414
415 pm_runtime_put(&pl08x->adev->dev);
416
423 /* Mark it as free */ 417 /* Mark it as free */
424 ch->serving = NULL; 418 ch->serving = NULL;
425 spin_unlock_irqrestore(&ch->lock, flags); 419 spin_unlock_irqrestore(&ch->lock, flags);
@@ -499,36 +493,30 @@ struct pl08x_lli_build_data {
499}; 493};
500 494
501/* 495/*
502 * Autoselect a master bus to use for the transfer this prefers the 496 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
503 * destination bus if both available if fixed address on one bus the 497 * victim in case src & dest are not similarly aligned. i.e. If after aligning
504 * other will be chosen 498 * masters address with width requirements of transfer (by sending few byte by
499 * byte data), slave is still not aligned, then its width will be reduced to
500 * BYTE.
501 * - prefers the destination bus if both available
502 * - prefers bus with fixed address (i.e. peripheral)
505 */ 503 */
506static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 504static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
507 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 505 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
508{ 506{
509 if (!(cctl & PL080_CONTROL_DST_INCR)) { 507 if (!(cctl & PL080_CONTROL_DST_INCR)) {
510 *mbus = &bd->srcbus;
511 *sbus = &bd->dstbus;
512 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
513 *mbus = &bd->dstbus; 508 *mbus = &bd->dstbus;
514 *sbus = &bd->srcbus; 509 *sbus = &bd->srcbus;
510 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
511 *mbus = &bd->srcbus;
512 *sbus = &bd->dstbus;
515 } else { 513 } else {
516 if (bd->dstbus.buswidth == 4) { 514 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
517 *mbus = &bd->dstbus; 515 *mbus = &bd->dstbus;
518 *sbus = &bd->srcbus; 516 *sbus = &bd->srcbus;
519 } else if (bd->srcbus.buswidth == 4) { 517 } else {
520 *mbus = &bd->srcbus;
521 *sbus = &bd->dstbus;
522 } else if (bd->dstbus.buswidth == 2) {
523 *mbus = &bd->dstbus;
524 *sbus = &bd->srcbus;
525 } else if (bd->srcbus.buswidth == 2) {
526 *mbus = &bd->srcbus; 518 *mbus = &bd->srcbus;
527 *sbus = &bd->dstbus; 519 *sbus = &bd->dstbus;
528 } else {
529 /* bd->srcbus.buswidth == 1 */
530 *mbus = &bd->dstbus;
531 *sbus = &bd->srcbus;
532 } 520 }
533 } 521 }
534} 522}
@@ -547,7 +535,8 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
547 llis_va[num_llis].cctl = cctl; 535 llis_va[num_llis].cctl = cctl;
548 llis_va[num_llis].src = bd->srcbus.addr; 536 llis_va[num_llis].src = bd->srcbus.addr;
549 llis_va[num_llis].dst = bd->dstbus.addr; 537 llis_va[num_llis].dst = bd->dstbus.addr;
550 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); 538 llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
539 sizeof(struct pl08x_lli);
551 llis_va[num_llis].lli |= bd->lli_bus; 540 llis_va[num_llis].lli |= bd->lli_bus;
552 541
553 if (cctl & PL080_CONTROL_SRC_INCR) 542 if (cctl & PL080_CONTROL_SRC_INCR)
@@ -560,16 +549,12 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
560 bd->remainder -= len; 549 bd->remainder -= len;
561} 550}
562 551
563/* 552static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
564 * Return number of bytes to fill to boundary, or len. 553 u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
565 * This calculation works for any value of addr.
566 */
567static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
568{ 554{
569 size_t boundary_len = PL08X_BOUNDARY_SIZE - 555 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
570 (addr & (PL08X_BOUNDARY_SIZE - 1)); 556 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
571 557 (*total_bytes) += len;
572 return min(boundary_len, len);
573} 558}
574 559
575/* 560/*
@@ -583,13 +568,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
583 struct pl08x_bus_data *mbus, *sbus; 568 struct pl08x_bus_data *mbus, *sbus;
584 struct pl08x_lli_build_data bd; 569 struct pl08x_lli_build_data bd;
585 int num_llis = 0; 570 int num_llis = 0;
586 u32 cctl; 571 u32 cctl, early_bytes = 0;
587 size_t max_bytes_per_lli; 572 size_t max_bytes_per_lli, total_bytes;
588 size_t total_bytes = 0;
589 struct pl08x_lli *llis_va; 573 struct pl08x_lli *llis_va;
574 struct pl08x_sg *dsg;
590 575
591 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 576 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
592 &txd->llis_bus);
593 if (!txd->llis_va) { 577 if (!txd->llis_va) {
594 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 578 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
595 return 0; 579 return 0;
@@ -597,13 +581,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
597 581
598 pl08x->pool_ctr++; 582 pl08x->pool_ctr++;
599 583
600 /* Get the default CCTL */
601 cctl = txd->cctl;
602
603 bd.txd = txd; 584 bd.txd = txd;
604 bd.srcbus.addr = txd->src_addr;
605 bd.dstbus.addr = txd->dst_addr;
606 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 585 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
586 cctl = txd->cctl;
607 587
608 /* Find maximum width of the source bus */ 588 /* Find maximum width of the source bus */
609 bd.srcbus.maxwidth = 589 bd.srcbus.maxwidth =
@@ -615,215 +595,179 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
615 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 595 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
616 PL080_CONTROL_DWIDTH_SHIFT); 596 PL080_CONTROL_DWIDTH_SHIFT);
617 597
618 /* Set up the bus widths to the maximum */ 598 list_for_each_entry(dsg, &txd->dsg_list, node) {
619 bd.srcbus.buswidth = bd.srcbus.maxwidth; 599 total_bytes = 0;
620 bd.dstbus.buswidth = bd.dstbus.maxwidth; 600 cctl = txd->cctl;
621 601
622 /* 602 bd.srcbus.addr = dsg->src_addr;
623 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 603 bd.dstbus.addr = dsg->dst_addr;
624 */ 604 bd.remainder = dsg->len;
625 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * 605 bd.srcbus.buswidth = bd.srcbus.maxwidth;
626 PL080_CONTROL_TRANSFER_SIZE_MASK; 606 bd.dstbus.buswidth = bd.dstbus.maxwidth;
627 607
628 /* We need to count this down to zero */ 608 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
629 bd.remainder = txd->len;
630 609
631 /* 610 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
632 * Choose bus to align to 611 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
633 * - prefers destination bus if both available 612 bd.srcbus.buswidth,
634 * - if fixed address on one bus chooses other 613 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
635 */ 614 bd.dstbus.buswidth,
636 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 615 bd.remainder);
637 616 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
638 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", 617 mbus == &bd.srcbus ? "src" : "dst",
639 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 618 sbus == &bd.srcbus ? "src" : "dst");
640 bd.srcbus.buswidth,
641 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
642 bd.dstbus.buswidth,
643 bd.remainder, max_bytes_per_lli);
644 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
645 mbus == &bd.srcbus ? "src" : "dst",
646 sbus == &bd.srcbus ? "src" : "dst");
647
648 if (txd->len < mbus->buswidth) {
649 /* Less than a bus width available - send as single bytes */
650 while (bd.remainder) {
651 dev_vdbg(&pl08x->adev->dev,
652 "%s single byte LLIs for a transfer of "
653 "less than a bus width (remain 0x%08x)\n",
654 __func__, bd.remainder);
655 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
656 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
657 total_bytes++;
658 }
659 } else {
660 /* Make one byte LLIs until master bus is aligned */
661 while ((mbus->addr) % (mbus->buswidth)) {
662 dev_vdbg(&pl08x->adev->dev,
663 "%s adjustment lli for less than bus width "
664 "(remain 0x%08x)\n",
665 __func__, bd.remainder);
666 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
667 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
668 total_bytes++;
669 }
670 619
671 /* 620 /*
672 * Master now aligned 621 * Zero length is only allowed if all these requirements are
673 * - if slave is not then we must set its width down 622 * met:
623 * - flow controller is peripheral.
624 * - src.addr is aligned to src.width
625 * - dst.addr is aligned to dst.width
626 *
627 * sg_len == 1 should be true, as there can be two cases here:
628 *
629 * - Memory addresses are contiguous and are not scattered.
630 * Here, Only one sg will be passed by user driver, with
631 * memory address and zero length. We pass this to controller
632 * and after the transfer it will receive the last burst
633 * request from peripheral and so transfer finishes.
634 *
635 * - Memory addresses are scattered and are not contiguous.
636 * Here, Obviously as DMA controller doesn't know when a lli's
637 * transfer gets over, it can't load next lli. So in this
638 * case, there has to be an assumption that only one lli is
639 * supported. Thus, we can't have scattered addresses.
674 */ 640 */
675 if (sbus->addr % sbus->buswidth) { 641 if (!bd.remainder) {
676 dev_dbg(&pl08x->adev->dev, 642 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
677 "%s set down bus width to one byte\n", 643 PL080_CONFIG_FLOW_CONTROL_SHIFT;
678 __func__); 644 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
645 (fc <= PL080_FLOW_SRC2DST_SRC))) {
646 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
647 __func__);
648 return 0;
649 }
650
651 if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
652 (bd.srcbus.addr % bd.srcbus.buswidth)) {
653 dev_err(&pl08x->adev->dev,
654 "%s src & dst address must be aligned to src"
655 " & dst width if peripheral is flow controller",
656 __func__);
657 return 0;
658 }
679 659
680 sbus->buswidth = 1; 660 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
661 bd.dstbus.buswidth, 0);
662 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
663 break;
681 } 664 }
682 665
683 /* 666 /*
684 * Make largest possible LLIs until less than one bus 667 * Send byte by byte for following cases
685 * width left 668 * - Less than a bus width available
669 * - until master bus is aligned
686 */ 670 */
687 while (bd.remainder > (mbus->buswidth - 1)) { 671 if (bd.remainder < mbus->buswidth)
688 size_t lli_len, target_len, tsize, odd_bytes; 672 early_bytes = bd.remainder;
673 else if ((mbus->addr) % (mbus->buswidth)) {
674 early_bytes = mbus->buswidth - (mbus->addr) %
675 (mbus->buswidth);
676 if ((bd.remainder - early_bytes) < mbus->buswidth)
677 early_bytes = bd.remainder;
678 }
689 679
680 if (early_bytes) {
681 dev_vdbg(&pl08x->adev->dev,
682 "%s byte width LLIs (remain 0x%08x)\n",
683 __func__, bd.remainder);
684 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
685 &total_bytes);
686 }
687
688 if (bd.remainder) {
690 /* 689 /*
691 * If enough left try to send max possible, 690 * Master now aligned
692 * otherwise try to send the remainder 691 * - if slave is not then we must set its width down
693 */ 692 */
694 target_len = min(bd.remainder, max_bytes_per_lli); 693 if (sbus->addr % sbus->buswidth) {
694 dev_dbg(&pl08x->adev->dev,
695 "%s set down bus width to one byte\n",
696 __func__);
697
698 sbus->buswidth = 1;
699 }
695 700
696 /* 701 /*
697 * Set bus lengths for incrementing buses to the 702 * Bytes transferred = tsize * src width, not
698 * number of bytes which fill to next memory boundary, 703 * MIN(buswidths)
699 * limiting on the target length calculated above.
700 */ 704 */
701 if (cctl & PL080_CONTROL_SRC_INCR) 705 max_bytes_per_lli = bd.srcbus.buswidth *
702 bd.srcbus.fill_bytes = 706 PL080_CONTROL_TRANSFER_SIZE_MASK;
703 pl08x_pre_boundary(bd.srcbus.addr, 707 dev_vdbg(&pl08x->adev->dev,
704 target_len); 708 "%s max bytes per lli = %zu\n",
705 else 709 __func__, max_bytes_per_lli);
706 bd.srcbus.fill_bytes = target_len; 710
707 711 /*
708 if (cctl & PL080_CONTROL_DST_INCR) 712 * Make largest possible LLIs until less than one bus
709 bd.dstbus.fill_bytes = 713 * width left
710 pl08x_pre_boundary(bd.dstbus.addr, 714 */
711 target_len); 715 while (bd.remainder > (mbus->buswidth - 1)) {
712 else 716 size_t lli_len, tsize, width;
713 bd.dstbus.fill_bytes = target_len;
714
715 /* Find the nearest */
716 lli_len = min(bd.srcbus.fill_bytes,
717 bd.dstbus.fill_bytes);
718
719 BUG_ON(lli_len > bd.remainder);
720
721 if (lli_len <= 0) {
722 dev_err(&pl08x->adev->dev,
723 "%s lli_len is %zu, <= 0\n",
724 __func__, lli_len);
725 return 0;
726 }
727 717
728 if (lli_len == target_len) {
729 /*
730 * Can send what we wanted.
731 * Maintain alignment
732 */
733 lli_len = (lli_len/mbus->buswidth) *
734 mbus->buswidth;
735 odd_bytes = 0;
736 } else {
737 /* 718 /*
738 * So now we know how many bytes to transfer 719 * If enough left try to send max possible,
739 * to get to the nearest boundary. The next 720 * otherwise try to send the remainder
740 * LLI will past the boundary. However, we
741 * may be working to a boundary on the slave
742 * bus. We need to ensure the master stays
743 * aligned, and that we are working in
744 * multiples of the bus widths.
745 */ 721 */
746 odd_bytes = lli_len % mbus->buswidth; 722 lli_len = min(bd.remainder, max_bytes_per_lli);
747 lli_len -= odd_bytes;
748
749 }
750 723
751 if (lli_len) {
752 /* 724 /*
753 * Check against minimum bus alignment: 725 * Check against maximum bus alignment:
754 * Calculate actual transfer size in relation 726 * Calculate actual transfer size in relation to
755 * to bus width an get a maximum remainder of 727 * bus width an get a maximum remainder of the
756 * the smallest bus width - 1 728 * highest bus width - 1
757 */ 729 */
758 /* FIXME: use round_down()? */ 730 width = max(mbus->buswidth, sbus->buswidth);
759 tsize = lli_len / min(mbus->buswidth, 731 lli_len = (lli_len / width) * width;
760 sbus->buswidth); 732 tsize = lli_len / bd.srcbus.buswidth;
761 lli_len = tsize * min(mbus->buswidth,
762 sbus->buswidth);
763
764 if (target_len != lli_len) {
765 dev_vdbg(&pl08x->adev->dev,
766 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
767 __func__, target_len, lli_len, txd->len);
768 }
769
770 cctl = pl08x_cctl_bits(cctl,
771 bd.srcbus.buswidth,
772 bd.dstbus.buswidth,
773 tsize);
774 733
775 dev_vdbg(&pl08x->adev->dev, 734 dev_vdbg(&pl08x->adev->dev,
776 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", 735 "%s fill lli with single lli chunk of "
736 "size 0x%08zx (remainder 0x%08zx)\n",
777 __func__, lli_len, bd.remainder); 737 __func__, lli_len, bd.remainder);
738
739 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
740 bd.dstbus.buswidth, tsize);
778 pl08x_fill_lli_for_desc(&bd, num_llis++, 741 pl08x_fill_lli_for_desc(&bd, num_llis++,
779 lli_len, cctl); 742 lli_len, cctl);
780 total_bytes += lli_len; 743 total_bytes += lli_len;
781 } 744 }
782 745
783 746 /*
784 if (odd_bytes) { 747 * Send any odd bytes
785 /* 748 */
786 * Creep past the boundary, maintaining 749 if (bd.remainder) {
787 * master alignment 750 dev_vdbg(&pl08x->adev->dev,
788 */ 751 "%s align with boundary, send odd bytes (remain %zu)\n",
789 int j; 752 __func__, bd.remainder);
790 for (j = 0; (j < mbus->buswidth) 753 prep_byte_width_lli(&bd, &cctl, bd.remainder,
791 && (bd.remainder); j++) { 754 num_llis++, &total_bytes);
792 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
793 dev_vdbg(&pl08x->adev->dev,
794 "%s align with boundary, single byte (remain 0x%08zx)\n",
795 __func__, bd.remainder);
796 pl08x_fill_lli_for_desc(&bd,
797 num_llis++, 1, cctl);
798 total_bytes++;
799 }
800 } 755 }
801 } 756 }
802 757
803 /* 758 if (total_bytes != dsg->len) {
804 * Send any odd bytes 759 dev_err(&pl08x->adev->dev,
805 */ 760 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
806 while (bd.remainder) { 761 __func__, total_bytes, dsg->len);
807 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 762 return 0;
808 dev_vdbg(&pl08x->adev->dev,
809 "%s align with boundary, single odd byte (remain %zu)\n",
810 __func__, bd.remainder);
811 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
812 total_bytes++;
813 } 763 }
814 }
815 if (total_bytes != txd->len) {
816 dev_err(&pl08x->adev->dev,
817 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
818 __func__, total_bytes, txd->len);
819 return 0;
820 }
821 764
822 if (num_llis >= MAX_NUM_TSFR_LLIS) { 765 if (num_llis >= MAX_NUM_TSFR_LLIS) {
823 dev_err(&pl08x->adev->dev, 766 dev_err(&pl08x->adev->dev,
824 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 767 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
825 __func__, (u32) MAX_NUM_TSFR_LLIS); 768 __func__, (u32) MAX_NUM_TSFR_LLIS);
826 return 0; 769 return 0;
770 }
827 } 771 }
828 772
829 llis_va = txd->llis_va; 773 llis_va = txd->llis_va;
@@ -856,11 +800,19 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
856static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 800static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
857 struct pl08x_txd *txd) 801 struct pl08x_txd *txd)
858{ 802{
803 struct pl08x_sg *dsg, *_dsg;
804
859 /* Free the LLI */ 805 /* Free the LLI */
860 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 806 if (txd->llis_va)
807 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
861 808
862 pl08x->pool_ctr--; 809 pl08x->pool_ctr--;
863 810
811 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
812 list_del(&dsg->node);
813 kfree(dsg);
814 }
815
864 kfree(txd); 816 kfree(txd);
865} 817}
866 818
@@ -917,9 +869,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
917 * need, but for slaves the physical signals may be muxed! 869 * need, but for slaves the physical signals may be muxed!
918 * Can the platform allow us to use this channel? 870 * Can the platform allow us to use this channel?
919 */ 871 */
920 if (plchan->slave && 872 if (plchan->slave && pl08x->pd->get_signal) {
921 ch->signal < 0 &&
922 pl08x->pd->get_signal) {
923 ret = pl08x->pd->get_signal(plchan); 873 ret = pl08x->pd->get_signal(plchan);
924 if (ret < 0) { 874 if (ret < 0) {
925 dev_dbg(&pl08x->adev->dev, 875 dev_dbg(&pl08x->adev->dev,
@@ -1008,10 +958,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1008 * If slaves are relying on interrupts to signal completion this function 958 * If slaves are relying on interrupts to signal completion this function
1009 * must not be called with interrupts disabled. 959 * must not be called with interrupts disabled.
1010 */ 960 */
1011static enum dma_status 961static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1012pl08x_dma_tx_status(struct dma_chan *chan, 962 dma_cookie_t cookie, struct dma_tx_state *txstate)
1013 dma_cookie_t cookie,
1014 struct dma_tx_state *txstate)
1015{ 963{
1016 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 964 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1017 dma_cookie_t last_used; 965 dma_cookie_t last_used;
@@ -1253,7 +1201,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1253 1201
1254 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1202 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1255 if (!num_llis) { 1203 if (!num_llis) {
1256 kfree(txd); 1204 spin_lock_irqsave(&plchan->lock, flags);
1205 pl08x_free_txd(pl08x, txd);
1206 spin_unlock_irqrestore(&plchan->lock, flags);
1257 return -EINVAL; 1207 return -EINVAL;
1258 } 1208 }
1259 1209
@@ -1301,13 +1251,14 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1301static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1251static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1302 unsigned long flags) 1252 unsigned long flags)
1303{ 1253{
1304 struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1254 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1305 1255
1306 if (txd) { 1256 if (txd) {
1307 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); 1257 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1308 txd->tx.flags = flags; 1258 txd->tx.flags = flags;
1309 txd->tx.tx_submit = pl08x_tx_submit; 1259 txd->tx.tx_submit = pl08x_tx_submit;
1310 INIT_LIST_HEAD(&txd->node); 1260 INIT_LIST_HEAD(&txd->node);
1261 INIT_LIST_HEAD(&txd->dsg_list);
1311 1262
1312 /* Always enable error and terminal interrupts */ 1263 /* Always enable error and terminal interrupts */
1313 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1264 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
@@ -1326,6 +1277,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1326 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1277 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1327 struct pl08x_driver_data *pl08x = plchan->host; 1278 struct pl08x_driver_data *pl08x = plchan->host;
1328 struct pl08x_txd *txd; 1279 struct pl08x_txd *txd;
1280 struct pl08x_sg *dsg;
1329 int ret; 1281 int ret;
1330 1282
1331 txd = pl08x_get_txd(plchan, flags); 1283 txd = pl08x_get_txd(plchan, flags);
@@ -1335,10 +1287,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1335 return NULL; 1287 return NULL;
1336 } 1288 }
1337 1289
1290 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1291 if (!dsg) {
1292 pl08x_free_txd(pl08x, txd);
1293 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1294 __func__);
1295 return NULL;
1296 }
1297 list_add_tail(&dsg->node, &txd->dsg_list);
1298
1338 txd->direction = DMA_NONE; 1299 txd->direction = DMA_NONE;
1339 txd->src_addr = src; 1300 dsg->src_addr = src;
1340 txd->dst_addr = dest; 1301 dsg->dst_addr = dest;
1341 txd->len = len; 1302 dsg->len = len;
1342 1303
1343 /* Set platform data for m2m */ 1304 /* Set platform data for m2m */
1344 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1305 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1367,19 +1328,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1367 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1328 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1368 struct pl08x_driver_data *pl08x = plchan->host; 1329 struct pl08x_driver_data *pl08x = plchan->host;
1369 struct pl08x_txd *txd; 1330 struct pl08x_txd *txd;
1370 int ret; 1331 struct pl08x_sg *dsg;
1371 1332 struct scatterlist *sg;
1372 /* 1333 dma_addr_t slave_addr;
1373 * Current implementation ASSUMES only one sg 1334 int ret, tmp;
1374 */
1375 if (sg_len != 1) {
1376 dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
1377 __func__);
1378 BUG();
1379 }
1380 1335
1381 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1336 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1382 __func__, sgl->length, plchan->name); 1337 __func__, sgl->length, plchan->name);
1383 1338
1384 txd = pl08x_get_txd(plchan, flags); 1339 txd = pl08x_get_txd(plchan, flags);
1385 if (!txd) { 1340 if (!txd) {
@@ -1398,24 +1353,49 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1398 * channel target address dynamically at runtime. 1353 * channel target address dynamically at runtime.
1399 */ 1354 */
1400 txd->direction = direction; 1355 txd->direction = direction;
1401 txd->len = sgl->length;
1402 1356
1403 if (direction == DMA_TO_DEVICE) { 1357 if (direction == DMA_TO_DEVICE) {
1404 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1405 txd->cctl = plchan->dst_cctl; 1358 txd->cctl = plchan->dst_cctl;
1406 txd->src_addr = sgl->dma_address; 1359 slave_addr = plchan->dst_addr;
1407 txd->dst_addr = plchan->dst_addr;
1408 } else if (direction == DMA_FROM_DEVICE) { 1360 } else if (direction == DMA_FROM_DEVICE) {
1409 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1410 txd->cctl = plchan->src_cctl; 1361 txd->cctl = plchan->src_cctl;
1411 txd->src_addr = plchan->src_addr; 1362 slave_addr = plchan->src_addr;
1412 txd->dst_addr = sgl->dma_address;
1413 } else { 1363 } else {
1364 pl08x_free_txd(pl08x, txd);
1414 dev_err(&pl08x->adev->dev, 1365 dev_err(&pl08x->adev->dev,
1415 "%s direction unsupported\n", __func__); 1366 "%s direction unsupported\n", __func__);
1416 return NULL; 1367 return NULL;
1417 } 1368 }
1418 1369
1370 if (plchan->cd->device_fc)
1371 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
1372 PL080_FLOW_PER2MEM_PER;
1373 else
1374 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
1375 PL080_FLOW_PER2MEM;
1376
1377 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1378
1379 for_each_sg(sgl, sg, sg_len, tmp) {
1380 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1381 if (!dsg) {
1382 pl08x_free_txd(pl08x, txd);
1383 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1384 __func__);
1385 return NULL;
1386 }
1387 list_add_tail(&dsg->node, &txd->dsg_list);
1388
1389 dsg->len = sg_dma_len(sg);
1390 if (direction == DMA_TO_DEVICE) {
1391 dsg->src_addr = sg_phys(sg);
1392 dsg->dst_addr = slave_addr;
1393 } else {
1394 dsg->src_addr = slave_addr;
1395 dsg->dst_addr = sg_phys(sg);
1396 }
1397 }
1398
1419 ret = pl08x_prep_channel_resources(plchan, txd); 1399 ret = pl08x_prep_channel_resources(plchan, txd);
1420 if (ret) 1400 if (ret)
1421 return NULL; 1401 return NULL;
@@ -1489,9 +1469,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1489 1469
1490bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1470bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1491{ 1471{
1492 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1472 struct pl08x_dma_chan *plchan;
1493 char *name = chan_id; 1473 char *name = chan_id;
1494 1474
1475 /* Reject channels for devices not bound to this driver */
1476 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1477 return false;
1478
1479 plchan = to_pl08x_chan(chan);
1480
1495 /* Check that the channel is not taken! */ 1481 /* Check that the channel is not taken! */
1496 if (!strcmp(plchan->name, name)) 1482 if (!strcmp(plchan->name, name))
1497 return true; 1483 return true;
@@ -1507,34 +1493,34 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1507 */ 1493 */
1508static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1494static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1509{ 1495{
1510 u32 val; 1496 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1511
1512 val = readl(pl08x->base + PL080_CONFIG);
1513 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
1514 /* We implicitly clear bit 1 and that means little-endian mode */
1515 val |= PL080_CONFIG_ENABLE;
1516 writel(val, pl08x->base + PL080_CONFIG);
1517} 1497}
1518 1498
1519static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1499static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1520{ 1500{
1521 struct device *dev = txd->tx.chan->device->dev; 1501 struct device *dev = txd->tx.chan->device->dev;
1502 struct pl08x_sg *dsg;
1522 1503
1523 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1504 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1524 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1505 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1525 dma_unmap_single(dev, txd->src_addr, txd->len, 1506 list_for_each_entry(dsg, &txd->dsg_list, node)
1526 DMA_TO_DEVICE); 1507 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1527 else 1508 DMA_TO_DEVICE);
1528 dma_unmap_page(dev, txd->src_addr, txd->len, 1509 else {
1529 DMA_TO_DEVICE); 1510 list_for_each_entry(dsg, &txd->dsg_list, node)
1511 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1512 DMA_TO_DEVICE);
1513 }
1530 } 1514 }
1531 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1515 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1532 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1516 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1533 dma_unmap_single(dev, txd->dst_addr, txd->len, 1517 list_for_each_entry(dsg, &txd->dsg_list, node)
1534 DMA_FROM_DEVICE); 1518 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1519 DMA_FROM_DEVICE);
1535 else 1520 else
1536 dma_unmap_page(dev, txd->dst_addr, txd->len, 1521 list_for_each_entry(dsg, &txd->dsg_list, node)
1537 DMA_FROM_DEVICE); 1522 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1523 DMA_FROM_DEVICE);
1538 } 1524 }
1539} 1525}
1540 1526
@@ -1589,8 +1575,8 @@ static void pl08x_tasklet(unsigned long data)
1589 */ 1575 */
1590 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1576 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1591 chan.device_node) { 1577 chan.device_node) {
1592 if (waiting->state == PL08X_CHAN_WAITING && 1578 if (waiting->state == PL08X_CHAN_WAITING &&
1593 waiting->waiting != NULL) { 1579 waiting->waiting != NULL) {
1594 int ret; 1580 int ret;
1595 1581
1596 /* This should REALLY not fail now */ 1582 /* This should REALLY not fail now */
@@ -1630,38 +1616,40 @@ static void pl08x_tasklet(unsigned long data)
1630static irqreturn_t pl08x_irq(int irq, void *dev) 1616static irqreturn_t pl08x_irq(int irq, void *dev)
1631{ 1617{
1632 struct pl08x_driver_data *pl08x = dev; 1618 struct pl08x_driver_data *pl08x = dev;
1633 u32 mask = 0; 1619 u32 mask = 0, err, tc, i;
1634 u32 val; 1620
1635 int i; 1621 /* check & clear - ERR & TC interrupts */
1636 1622 err = readl(pl08x->base + PL080_ERR_STATUS);
1637 val = readl(pl08x->base + PL080_ERR_STATUS); 1623 if (err) {
1638 if (val) { 1624 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1639 /* An error interrupt (on one or more channels) */ 1625 __func__, err);
1640 dev_err(&pl08x->adev->dev, 1626 writel(err, pl08x->base + PL080_ERR_CLEAR);
1641 "%s error interrupt, register value 0x%08x\n",
1642 __func__, val);
1643 /*
1644 * Simply clear ALL PL08X error interrupts,
1645 * regardless of channel and cause
1646 * FIXME: should be 0x00000003 on PL081 really.
1647 */
1648 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1649 } 1627 }
1650 val = readl(pl08x->base + PL080_INT_STATUS); 1628 tc = readl(pl08x->base + PL080_INT_STATUS);
1629 if (tc)
1630 writel(tc, pl08x->base + PL080_TC_CLEAR);
1631
1632 if (!err && !tc)
1633 return IRQ_NONE;
1634
1651 for (i = 0; i < pl08x->vd->channels; i++) { 1635 for (i = 0; i < pl08x->vd->channels; i++) {
1652 if ((1 << i) & val) { 1636 if (((1 << i) & err) || ((1 << i) & tc)) {
1653 /* Locate physical channel */ 1637 /* Locate physical channel */
1654 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1638 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1655 struct pl08x_dma_chan *plchan = phychan->serving; 1639 struct pl08x_dma_chan *plchan = phychan->serving;
1656 1640
1641 if (!plchan) {
1642 dev_err(&pl08x->adev->dev,
1643 "%s Error TC interrupt on unused channel: 0x%08x\n",
1644 __func__, i);
1645 continue;
1646 }
1647
1657 /* Schedule tasklet on this channel */ 1648 /* Schedule tasklet on this channel */
1658 tasklet_schedule(&plchan->tasklet); 1649 tasklet_schedule(&plchan->tasklet);
1659
1660 mask |= (1 << i); 1650 mask |= (1 << i);
1661 } 1651 }
1662 } 1652 }
1663 /* Clear only the terminal interrupts on channels we processed */
1664 writel(mask, pl08x->base + PL080_TC_CLEAR);
1665 1653
1666 return mask ? IRQ_HANDLED : IRQ_NONE; 1654 return mask ? IRQ_HANDLED : IRQ_NONE;
1667} 1655}
@@ -1685,9 +1673,7 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1685 * Make a local wrapper to hold required data 1673 * Make a local wrapper to hold required data
1686 */ 1674 */
1687static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1675static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1688 struct dma_device *dmadev, 1676 struct dma_device *dmadev, unsigned int channels, bool slave)
1689 unsigned int channels,
1690 bool slave)
1691{ 1677{
1692 struct pl08x_dma_chan *chan; 1678 struct pl08x_dma_chan *chan;
1693 int i; 1679 int i;
@@ -1700,7 +1686,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1700 * to cope with that situation. 1686 * to cope with that situation.
1701 */ 1687 */
1702 for (i = 0; i < channels; i++) { 1688 for (i = 0; i < channels; i++) {
1703 chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); 1689 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1704 if (!chan) { 1690 if (!chan) {
1705 dev_err(&pl08x->adev->dev, 1691 dev_err(&pl08x->adev->dev,
1706 "%s no memory for channel\n", __func__); 1692 "%s no memory for channel\n", __func__);
@@ -1728,7 +1714,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1728 kfree(chan); 1714 kfree(chan);
1729 continue; 1715 continue;
1730 } 1716 }
1731 dev_info(&pl08x->adev->dev, 1717 dev_dbg(&pl08x->adev->dev,
1732 "initialize virtual channel \"%s\"\n", 1718 "initialize virtual channel \"%s\"\n",
1733 chan->name); 1719 chan->name);
1734 1720
@@ -1837,9 +1823,9 @@ static const struct file_operations pl08x_debugfs_operations = {
1837static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1823static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1838{ 1824{
1839 /* Expose a simple debugfs interface to view all clocks */ 1825 /* Expose a simple debugfs interface to view all clocks */
1840 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, 1826 (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
1841 NULL, pl08x, 1827 S_IFREG | S_IRUGO, NULL, pl08x,
1842 &pl08x_debugfs_operations); 1828 &pl08x_debugfs_operations);
1843} 1829}
1844 1830
1845#else 1831#else
@@ -1860,12 +1846,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1860 return ret; 1846 return ret;
1861 1847
1862 /* Create the driver state holder */ 1848 /* Create the driver state holder */
1863 pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); 1849 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
1864 if (!pl08x) { 1850 if (!pl08x) {
1865 ret = -ENOMEM; 1851 ret = -ENOMEM;
1866 goto out_no_pl08x; 1852 goto out_no_pl08x;
1867 } 1853 }
1868 1854
1855 pm_runtime_set_active(&adev->dev);
1856 pm_runtime_enable(&adev->dev);
1857
1869 /* Initialize memcpy engine */ 1858 /* Initialize memcpy engine */
1870 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1859 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1871 pl08x->memcpy.dev = &adev->dev; 1860 pl08x->memcpy.dev = &adev->dev;
@@ -1939,7 +1928,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1939 } 1928 }
1940 1929
1941 /* Initialize physical channels */ 1930 /* Initialize physical channels */
1942 pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), 1931 pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
1943 GFP_KERNEL); 1932 GFP_KERNEL);
1944 if (!pl08x->phy_chans) { 1933 if (!pl08x->phy_chans) {
1945 dev_err(&adev->dev, "%s failed to allocate " 1934 dev_err(&adev->dev, "%s failed to allocate "
@@ -1956,9 +1945,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1956 spin_lock_init(&ch->lock); 1945 spin_lock_init(&ch->lock);
1957 ch->serving = NULL; 1946 ch->serving = NULL;
1958 ch->signal = -1; 1947 ch->signal = -1;
1959 dev_info(&adev->dev, 1948 dev_dbg(&adev->dev, "physical channel %d is %s\n",
1960 "physical channel %d is %s\n", i, 1949 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1961 pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1962 } 1950 }
1963 1951
1964 /* Register as many memcpy channels as there are physical channels */ 1952 /* Register as many memcpy channels as there are physical channels */
@@ -1974,8 +1962,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1974 1962
1975 /* Register slave channels */ 1963 /* Register slave channels */
1976 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1964 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
1977 pl08x->pd->num_slave_channels, 1965 pl08x->pd->num_slave_channels, true);
1978 true);
1979 if (ret <= 0) { 1966 if (ret <= 0) {
1980 dev_warn(&pl08x->adev->dev, 1967 dev_warn(&pl08x->adev->dev,
1981 "%s failed to enumerate slave channels - %d\n", 1968 "%s failed to enumerate slave channels - %d\n",
@@ -2005,6 +1992,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2005 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 1992 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2006 amba_part(adev), amba_rev(adev), 1993 amba_part(adev), amba_rev(adev),
2007 (unsigned long long)adev->res.start, adev->irq[0]); 1994 (unsigned long long)adev->res.start, adev->irq[0]);
1995
1996 pm_runtime_put(&adev->dev);
2008 return 0; 1997 return 0;
2009 1998
2010out_no_slave_reg: 1999out_no_slave_reg:
@@ -2023,6 +2012,9 @@ out_no_ioremap:
2023 dma_pool_destroy(pl08x->pool); 2012 dma_pool_destroy(pl08x->pool);
2024out_no_lli_pool: 2013out_no_lli_pool:
2025out_no_platdata: 2014out_no_platdata:
2015 pm_runtime_put(&adev->dev);
2016 pm_runtime_disable(&adev->dev);
2017
2026 kfree(pl08x); 2018 kfree(pl08x);
2027out_no_pl08x: 2019out_no_pl08x:
2028 amba_release_regions(adev); 2020 amba_release_regions(adev);