diff options
author | Vinod Koul <vinod.koul@linux.intel.com> | 2011-09-02 07:13:44 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2011-09-02 07:13:44 -0400 |
commit | 8516f52fa497b37eb3d5e58d34e61a41ae0a553a (patch) | |
tree | b73175b134d36bea303d2f181ae89a448c2add81 /drivers/dma/amba-pl08x.c | |
parent | c6a389f123b9f68d605bb7e0f9b32ec1e3e14132 (diff) | |
parent | 7b4b88e067d37cbbafd856121767f7e154294eb2 (diff) |
Merge branch 'next' into v3.1-rc4
Fixed trivial conflicts in drivers/dma/amba-pl08x.c
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/amba-pl08x.c')
-rw-r--r-- | drivers/dma/amba-pl08x.c | 445 |
1 files changed, 188 insertions, 257 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index be21e3f138a8..3c2cad5b1165 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -66,28 +66,23 @@ | |||
66 | * after the final transfer signalled by LBREQ or LSREQ. The DMAC | 66 | * after the final transfer signalled by LBREQ or LSREQ. The DMAC |
67 | * will then move to the next LLI entry. | 67 | * will then move to the next LLI entry. |
68 | * | 68 | * |
69 | * Only the former works sanely with scatter lists, so we only implement | ||
70 | * the DMAC flow control method. However, peripherals which use the LBREQ | ||
71 | * and LSREQ signals (eg, MMCI) are unable to use this mode, which through | ||
72 | * these hardware restrictions prevents them from using scatter DMA. | ||
73 | * | ||
74 | * Global TODO: | 69 | * Global TODO: |
75 | * - Break out common code from arch/arm/mach-s3c64xx and share | 70 | * - Break out common code from arch/arm/mach-s3c64xx and share |
76 | */ | 71 | */ |
77 | #include <linux/device.h> | ||
78 | #include <linux/init.h> | ||
79 | #include <linux/module.h> | ||
80 | #include <linux/interrupt.h> | ||
81 | #include <linux/slab.h> | ||
82 | #include <linux/delay.h> | ||
83 | #include <linux/dma-mapping.h> | ||
84 | #include <linux/dmapool.h> | ||
85 | #include <linux/dmaengine.h> | ||
86 | #include <linux/amba/bus.h> | 72 | #include <linux/amba/bus.h> |
87 | #include <linux/amba/pl08x.h> | 73 | #include <linux/amba/pl08x.h> |
88 | #include <linux/debugfs.h> | 74 | #include <linux/debugfs.h> |
75 | #include <linux/delay.h> | ||
76 | #include <linux/device.h> | ||
77 | #include <linux/dmaengine.h> | ||
78 | #include <linux/dmapool.h> | ||
79 | #include <linux/dma-mapping.h> | ||
80 | #include <linux/init.h> | ||
81 | #include <linux/interrupt.h> | ||
82 | #include <linux/module.h> | ||
83 | #include <linux/pm_runtime.h> | ||
89 | #include <linux/seq_file.h> | 84 | #include <linux/seq_file.h> |
90 | 85 | #include <linux/slab.h> | |
91 | #include <asm/hardware/pl080.h> | 86 | #include <asm/hardware/pl080.h> |
92 | 87 | ||
93 | #define DRIVER_NAME "pl08xdmac" | 88 | #define DRIVER_NAME "pl08xdmac" |
@@ -126,7 +121,8 @@ struct pl08x_lli { | |||
126 | * @phy_chans: array of data for the physical channels | 121 | * @phy_chans: array of data for the physical channels |
127 | * @pool: a pool for the LLI descriptors | 122 | * @pool: a pool for the LLI descriptors |
128 | * @pool_ctr: counter of LLIs in the pool | 123 | * @pool_ctr: counter of LLIs in the pool |
129 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches | 124 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI |
125 | * fetches | ||
130 | * @mem_buses: set to indicate memory transfers on AHB2. | 126 | * @mem_buses: set to indicate memory transfers on AHB2. |
131 | * @lock: a spinlock for this struct | 127 | * @lock: a spinlock for this struct |
132 | */ | 128 | */ |
@@ -149,14 +145,6 @@ struct pl08x_driver_data { | |||
149 | * PL08X specific defines | 145 | * PL08X specific defines |
150 | */ | 146 | */ |
151 | 147 | ||
152 | /* | ||
153 | * Memory boundaries: the manual for PL08x says that the controller | ||
154 | * cannot read past a 1KiB boundary, so these defines are used to | ||
155 | * create transfer LLIs that do not cross such boundaries. | ||
156 | */ | ||
157 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ | ||
158 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) | ||
159 | |||
160 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | 148 | /* Size (bytes) of each LLI buffer allocated for one transfer */ |
161 | # define PL08X_LLI_TSFR_SIZE 0x2000 | 149 | # define PL08X_LLI_TSFR_SIZE 0x2000 |
162 | 150 | ||
@@ -272,7 +260,6 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | |||
272 | writel(val, ch->base + PL080_CH_CONFIG); | 260 | writel(val, ch->base + PL080_CH_CONFIG); |
273 | } | 261 | } |
274 | 262 | ||
275 | |||
276 | /* | 263 | /* |
277 | * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and | 264 | * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and |
278 | * clears any pending interrupt status. This should not be used for | 265 | * clears any pending interrupt status. This should not be used for |
@@ -407,6 +394,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | |||
407 | return NULL; | 394 | return NULL; |
408 | } | 395 | } |
409 | 396 | ||
397 | pm_runtime_get_sync(&pl08x->adev->dev); | ||
410 | return ch; | 398 | return ch; |
411 | } | 399 | } |
412 | 400 | ||
@@ -420,6 +408,8 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, | |||
420 | /* Stop the channel and clear its interrupts */ | 408 | /* Stop the channel and clear its interrupts */ |
421 | pl08x_terminate_phy_chan(pl08x, ch); | 409 | pl08x_terminate_phy_chan(pl08x, ch); |
422 | 410 | ||
411 | pm_runtime_put(&pl08x->adev->dev); | ||
412 | |||
423 | /* Mark it as free */ | 413 | /* Mark it as free */ |
424 | ch->serving = NULL; | 414 | ch->serving = NULL; |
425 | spin_unlock_irqrestore(&ch->lock, flags); | 415 | spin_unlock_irqrestore(&ch->lock, flags); |
@@ -499,36 +489,30 @@ struct pl08x_lli_build_data { | |||
499 | }; | 489 | }; |
500 | 490 | ||
501 | /* | 491 | /* |
502 | * Autoselect a master bus to use for the transfer this prefers the | 492 | * Autoselect a master bus to use for the transfer. Slave will be the chosen as |
503 | * destination bus if both available if fixed address on one bus the | 493 | * victim in case src & dest are not similarly aligned. i.e. If after aligning |
504 | * other will be chosen | 494 | * masters address with width requirements of transfer (by sending few byte by |
495 | * byte data), slave is still not aligned, then its width will be reduced to | ||
496 | * BYTE. | ||
497 | * - prefers the destination bus if both available | ||
498 | * - prefers bus with fixed address (i.e. peripheral) | ||
505 | */ | 499 | */ |
506 | static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, | 500 | static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, |
507 | struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) | 501 | struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) |
508 | { | 502 | { |
509 | if (!(cctl & PL080_CONTROL_DST_INCR)) { | 503 | if (!(cctl & PL080_CONTROL_DST_INCR)) { |
510 | *mbus = &bd->srcbus; | ||
511 | *sbus = &bd->dstbus; | ||
512 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { | ||
513 | *mbus = &bd->dstbus; | 504 | *mbus = &bd->dstbus; |
514 | *sbus = &bd->srcbus; | 505 | *sbus = &bd->srcbus; |
506 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { | ||
507 | *mbus = &bd->srcbus; | ||
508 | *sbus = &bd->dstbus; | ||
515 | } else { | 509 | } else { |
516 | if (bd->dstbus.buswidth == 4) { | 510 | if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { |
517 | *mbus = &bd->dstbus; | 511 | *mbus = &bd->dstbus; |
518 | *sbus = &bd->srcbus; | 512 | *sbus = &bd->srcbus; |
519 | } else if (bd->srcbus.buswidth == 4) { | 513 | } else { |
520 | *mbus = &bd->srcbus; | ||
521 | *sbus = &bd->dstbus; | ||
522 | } else if (bd->dstbus.buswidth == 2) { | ||
523 | *mbus = &bd->dstbus; | ||
524 | *sbus = &bd->srcbus; | ||
525 | } else if (bd->srcbus.buswidth == 2) { | ||
526 | *mbus = &bd->srcbus; | 514 | *mbus = &bd->srcbus; |
527 | *sbus = &bd->dstbus; | 515 | *sbus = &bd->dstbus; |
528 | } else { | ||
529 | /* bd->srcbus.buswidth == 1 */ | ||
530 | *mbus = &bd->dstbus; | ||
531 | *sbus = &bd->srcbus; | ||
532 | } | 516 | } |
533 | } | 517 | } |
534 | } | 518 | } |
@@ -547,7 +531,8 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, | |||
547 | llis_va[num_llis].cctl = cctl; | 531 | llis_va[num_llis].cctl = cctl; |
548 | llis_va[num_llis].src = bd->srcbus.addr; | 532 | llis_va[num_llis].src = bd->srcbus.addr; |
549 | llis_va[num_llis].dst = bd->dstbus.addr; | 533 | llis_va[num_llis].dst = bd->dstbus.addr; |
550 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); | 534 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * |
535 | sizeof(struct pl08x_lli); | ||
551 | llis_va[num_llis].lli |= bd->lli_bus; | 536 | llis_va[num_llis].lli |= bd->lli_bus; |
552 | 537 | ||
553 | if (cctl & PL080_CONTROL_SRC_INCR) | 538 | if (cctl & PL080_CONTROL_SRC_INCR) |
@@ -560,16 +545,12 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, | |||
560 | bd->remainder -= len; | 545 | bd->remainder -= len; |
561 | } | 546 | } |
562 | 547 | ||
563 | /* | 548 | static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, |
564 | * Return number of bytes to fill to boundary, or len. | 549 | u32 *cctl, u32 len, int num_llis, size_t *total_bytes) |
565 | * This calculation works for any value of addr. | ||
566 | */ | ||
567 | static inline size_t pl08x_pre_boundary(u32 addr, size_t len) | ||
568 | { | 550 | { |
569 | size_t boundary_len = PL08X_BOUNDARY_SIZE - | 551 | *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); |
570 | (addr & (PL08X_BOUNDARY_SIZE - 1)); | 552 | pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); |
571 | 553 | (*total_bytes) += len; | |
572 | return min(boundary_len, len); | ||
573 | } | 554 | } |
574 | 555 | ||
575 | /* | 556 | /* |
@@ -583,13 +564,11 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
583 | struct pl08x_bus_data *mbus, *sbus; | 564 | struct pl08x_bus_data *mbus, *sbus; |
584 | struct pl08x_lli_build_data bd; | 565 | struct pl08x_lli_build_data bd; |
585 | int num_llis = 0; | 566 | int num_llis = 0; |
586 | u32 cctl; | 567 | u32 cctl, early_bytes = 0; |
587 | size_t max_bytes_per_lli; | 568 | size_t max_bytes_per_lli, total_bytes = 0; |
588 | size_t total_bytes = 0; | ||
589 | struct pl08x_lli *llis_va; | 569 | struct pl08x_lli *llis_va; |
590 | 570 | ||
591 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, | 571 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); |
592 | &txd->llis_bus); | ||
593 | if (!txd->llis_va) { | 572 | if (!txd->llis_va) { |
594 | dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); | 573 | dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); |
595 | return 0; | 574 | return 0; |
@@ -619,55 +598,85 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
619 | bd.srcbus.buswidth = bd.srcbus.maxwidth; | 598 | bd.srcbus.buswidth = bd.srcbus.maxwidth; |
620 | bd.dstbus.buswidth = bd.dstbus.maxwidth; | 599 | bd.dstbus.buswidth = bd.dstbus.maxwidth; |
621 | 600 | ||
622 | /* | ||
623 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) | ||
624 | */ | ||
625 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * | ||
626 | PL080_CONTROL_TRANSFER_SIZE_MASK; | ||
627 | |||
628 | /* We need to count this down to zero */ | 601 | /* We need to count this down to zero */ |
629 | bd.remainder = txd->len; | 602 | bd.remainder = txd->len; |
630 | 603 | ||
631 | /* | ||
632 | * Choose bus to align to | ||
633 | * - prefers destination bus if both available | ||
634 | * - if fixed address on one bus chooses other | ||
635 | */ | ||
636 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); | 604 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); |
637 | 605 | ||
638 | dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", | 606 | dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", |
639 | bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", | 607 | bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", |
640 | bd.srcbus.buswidth, | 608 | bd.srcbus.buswidth, |
641 | bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", | 609 | bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", |
642 | bd.dstbus.buswidth, | 610 | bd.dstbus.buswidth, |
643 | bd.remainder, max_bytes_per_lli); | 611 | bd.remainder); |
644 | dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", | 612 | dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", |
645 | mbus == &bd.srcbus ? "src" : "dst", | 613 | mbus == &bd.srcbus ? "src" : "dst", |
646 | sbus == &bd.srcbus ? "src" : "dst"); | 614 | sbus == &bd.srcbus ? "src" : "dst"); |
647 | 615 | ||
648 | if (txd->len < mbus->buswidth) { | 616 | /* |
649 | /* Less than a bus width available - send as single bytes */ | 617 | * Zero length is only allowed if all these requirements are met: |
650 | while (bd.remainder) { | 618 | * - flow controller is peripheral. |
651 | dev_vdbg(&pl08x->adev->dev, | 619 | * - src.addr is aligned to src.width |
652 | "%s single byte LLIs for a transfer of " | 620 | * - dst.addr is aligned to dst.width |
653 | "less than a bus width (remain 0x%08x)\n", | 621 | * |
654 | __func__, bd.remainder); | 622 | * sg_len == 1 should be true, as there can be two cases here: |
655 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 623 | * - Memory addresses are contiguous and are not scattered. Here, Only |
656 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); | 624 | * one sg will be passed by user driver, with memory address and zero |
657 | total_bytes++; | 625 | * length. We pass this to controller and after the transfer it will |
626 | * receive the last burst request from peripheral and so transfer | ||
627 | * finishes. | ||
628 | * | ||
629 | * - Memory addresses are scattered and are not contiguous. Here, | ||
630 | * Obviously as DMA controller doesn't know when a lli's transfer gets | ||
631 | * over, it can't load next lli. So in this case, there has to be an | ||
632 | * assumption that only one lli is supported. Thus, we can't have | ||
633 | * scattered addresses. | ||
634 | */ | ||
635 | if (!bd.remainder) { | ||
636 | u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> | ||
637 | PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
638 | if (!((fc >= PL080_FLOW_SRC2DST_DST) && | ||
639 | (fc <= PL080_FLOW_SRC2DST_SRC))) { | ||
640 | dev_err(&pl08x->adev->dev, "%s sg len can't be zero", | ||
641 | __func__); | ||
642 | return 0; | ||
658 | } | 643 | } |
659 | } else { | 644 | |
660 | /* Make one byte LLIs until master bus is aligned */ | 645 | if ((bd.srcbus.addr % bd.srcbus.buswidth) || |
661 | while ((mbus->addr) % (mbus->buswidth)) { | 646 | (bd.srcbus.addr % bd.srcbus.buswidth)) { |
662 | dev_vdbg(&pl08x->adev->dev, | 647 | dev_err(&pl08x->adev->dev, |
663 | "%s adjustment lli for less than bus width " | 648 | "%s src & dst address must be aligned to src" |
664 | "(remain 0x%08x)\n", | 649 | " & dst width if peripheral is flow controller", |
665 | __func__, bd.remainder); | 650 | __func__); |
666 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 651 | return 0; |
667 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); | ||
668 | total_bytes++; | ||
669 | } | 652 | } |
670 | 653 | ||
654 | cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, | ||
655 | bd.dstbus.buswidth, 0); | ||
656 | pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); | ||
657 | } | ||
658 | |||
659 | /* | ||
660 | * Send byte by byte for following cases | ||
661 | * - Less than a bus width available | ||
662 | * - until master bus is aligned | ||
663 | */ | ||
664 | if (bd.remainder < mbus->buswidth) | ||
665 | early_bytes = bd.remainder; | ||
666 | else if ((mbus->addr) % (mbus->buswidth)) { | ||
667 | early_bytes = mbus->buswidth - (mbus->addr) % (mbus->buswidth); | ||
668 | if ((bd.remainder - early_bytes) < mbus->buswidth) | ||
669 | early_bytes = bd.remainder; | ||
670 | } | ||
671 | |||
672 | if (early_bytes) { | ||
673 | dev_vdbg(&pl08x->adev->dev, "%s byte width LLIs " | ||
674 | "(remain 0x%08x)\n", __func__, bd.remainder); | ||
675 | prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, | ||
676 | &total_bytes); | ||
677 | } | ||
678 | |||
679 | if (bd.remainder) { | ||
671 | /* | 680 | /* |
672 | * Master now aligned | 681 | * Master now aligned |
673 | * - if slave is not then we must set its width down | 682 | * - if slave is not then we must set its width down |
@@ -680,138 +689,55 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
680 | sbus->buswidth = 1; | 689 | sbus->buswidth = 1; |
681 | } | 690 | } |
682 | 691 | ||
692 | /* Bytes transferred = tsize * src width, not MIN(buswidths) */ | ||
693 | max_bytes_per_lli = bd.srcbus.buswidth * | ||
694 | PL080_CONTROL_TRANSFER_SIZE_MASK; | ||
695 | |||
683 | /* | 696 | /* |
684 | * Make largest possible LLIs until less than one bus | 697 | * Make largest possible LLIs until less than one bus |
685 | * width left | 698 | * width left |
686 | */ | 699 | */ |
687 | while (bd.remainder > (mbus->buswidth - 1)) { | 700 | while (bd.remainder > (mbus->buswidth - 1)) { |
688 | size_t lli_len, target_len, tsize, odd_bytes; | 701 | size_t lli_len, tsize, width; |
689 | 702 | ||
690 | /* | 703 | /* |
691 | * If enough left try to send max possible, | 704 | * If enough left try to send max possible, |
692 | * otherwise try to send the remainder | 705 | * otherwise try to send the remainder |
693 | */ | 706 | */ |
694 | target_len = min(bd.remainder, max_bytes_per_lli); | 707 | lli_len = min(bd.remainder, max_bytes_per_lli); |
695 | 708 | ||
696 | /* | 709 | /* |
697 | * Set bus lengths for incrementing buses to the | 710 | * Check against maximum bus alignment: Calculate actual |
698 | * number of bytes which fill to next memory boundary, | 711 | * transfer size in relation to bus width and get a |
699 | * limiting on the target length calculated above. | 712 | * maximum remainder of the highest bus width - 1 |
700 | */ | 713 | */ |
701 | if (cctl & PL080_CONTROL_SRC_INCR) | 714 | width = max(mbus->buswidth, sbus->buswidth); |
702 | bd.srcbus.fill_bytes = | 715 | lli_len = (lli_len / width) * width; |
703 | pl08x_pre_boundary(bd.srcbus.addr, | 716 | tsize = lli_len / bd.srcbus.buswidth; |
704 | target_len); | ||
705 | else | ||
706 | bd.srcbus.fill_bytes = target_len; | ||
707 | |||
708 | if (cctl & PL080_CONTROL_DST_INCR) | ||
709 | bd.dstbus.fill_bytes = | ||
710 | pl08x_pre_boundary(bd.dstbus.addr, | ||
711 | target_len); | ||
712 | else | ||
713 | bd.dstbus.fill_bytes = target_len; | ||
714 | |||
715 | /* Find the nearest */ | ||
716 | lli_len = min(bd.srcbus.fill_bytes, | ||
717 | bd.dstbus.fill_bytes); | ||
718 | |||
719 | BUG_ON(lli_len > bd.remainder); | ||
720 | |||
721 | if (lli_len <= 0) { | ||
722 | dev_err(&pl08x->adev->dev, | ||
723 | "%s lli_len is %zu, <= 0\n", | ||
724 | __func__, lli_len); | ||
725 | return 0; | ||
726 | } | ||
727 | 717 | ||
728 | if (lli_len == target_len) { | 718 | dev_vdbg(&pl08x->adev->dev, |
729 | /* | 719 | "%s fill lli with single lli chunk of " |
730 | * Can send what we wanted. | 720 | "size 0x%08zx (remainder 0x%08zx)\n", |
731 | * Maintain alignment | 721 | __func__, lli_len, bd.remainder); |
732 | */ | 722 | |
733 | lli_len = (lli_len/mbus->buswidth) * | 723 | cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, |
734 | mbus->buswidth; | 724 | bd.dstbus.buswidth, tsize); |
735 | odd_bytes = 0; | 725 | pl08x_fill_lli_for_desc(&bd, num_llis++, lli_len, cctl); |
736 | } else { | 726 | total_bytes += lli_len; |
737 | /* | ||
738 | * So now we know how many bytes to transfer | ||
739 | * to get to the nearest boundary. The next | ||
740 | * LLI will past the boundary. However, we | ||
741 | * may be working to a boundary on the slave | ||
742 | * bus. We need to ensure the master stays | ||
743 | * aligned, and that we are working in | ||
744 | * multiples of the bus widths. | ||
745 | */ | ||
746 | odd_bytes = lli_len % mbus->buswidth; | ||
747 | lli_len -= odd_bytes; | ||
748 | |||
749 | } | ||
750 | |||
751 | if (lli_len) { | ||
752 | /* | ||
753 | * Check against minimum bus alignment: | ||
754 | * Calculate actual transfer size in relation | ||
755 | * to bus width an get a maximum remainder of | ||
756 | * the smallest bus width - 1 | ||
757 | */ | ||
758 | /* FIXME: use round_down()? */ | ||
759 | tsize = lli_len / min(mbus->buswidth, | ||
760 | sbus->buswidth); | ||
761 | lli_len = tsize * min(mbus->buswidth, | ||
762 | sbus->buswidth); | ||
763 | |||
764 | if (target_len != lli_len) { | ||
765 | dev_vdbg(&pl08x->adev->dev, | ||
766 | "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", | ||
767 | __func__, target_len, lli_len, txd->len); | ||
768 | } | ||
769 | |||
770 | cctl = pl08x_cctl_bits(cctl, | ||
771 | bd.srcbus.buswidth, | ||
772 | bd.dstbus.buswidth, | ||
773 | tsize); | ||
774 | |||
775 | dev_vdbg(&pl08x->adev->dev, | ||
776 | "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", | ||
777 | __func__, lli_len, bd.remainder); | ||
778 | pl08x_fill_lli_for_desc(&bd, num_llis++, | ||
779 | lli_len, cctl); | ||
780 | total_bytes += lli_len; | ||
781 | } | ||
782 | |||
783 | |||
784 | if (odd_bytes) { | ||
785 | /* | ||
786 | * Creep past the boundary, maintaining | ||
787 | * master alignment | ||
788 | */ | ||
789 | int j; | ||
790 | for (j = 0; (j < mbus->buswidth) | ||
791 | && (bd.remainder); j++) { | ||
792 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
793 | dev_vdbg(&pl08x->adev->dev, | ||
794 | "%s align with boundary, single byte (remain 0x%08zx)\n", | ||
795 | __func__, bd.remainder); | ||
796 | pl08x_fill_lli_for_desc(&bd, | ||
797 | num_llis++, 1, cctl); | ||
798 | total_bytes++; | ||
799 | } | ||
800 | } | ||
801 | } | 727 | } |
802 | 728 | ||
803 | /* | 729 | /* |
804 | * Send any odd bytes | 730 | * Send any odd bytes |
805 | */ | 731 | */ |
806 | while (bd.remainder) { | 732 | if (bd.remainder) { |
807 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
808 | dev_vdbg(&pl08x->adev->dev, | 733 | dev_vdbg(&pl08x->adev->dev, |
809 | "%s align with boundary, single odd byte (remain %zu)\n", | 734 | "%s align with boundary, send odd bytes (remain %zu)\n", |
810 | __func__, bd.remainder); | 735 | __func__, bd.remainder); |
811 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); | 736 | prep_byte_width_lli(&bd, &cctl, bd.remainder, |
812 | total_bytes++; | 737 | num_llis++, &total_bytes); |
813 | } | 738 | } |
814 | } | 739 | } |
740 | |||
815 | if (total_bytes != txd->len) { | 741 | if (total_bytes != txd->len) { |
816 | dev_err(&pl08x->adev->dev, | 742 | dev_err(&pl08x->adev->dev, |
817 | "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", | 743 | "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", |
@@ -917,9 +843,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |||
917 | * need, but for slaves the physical signals may be muxed! | 843 | * need, but for slaves the physical signals may be muxed! |
918 | * Can the platform allow us to use this channel? | 844 | * Can the platform allow us to use this channel? |
919 | */ | 845 | */ |
920 | if (plchan->slave && | 846 | if (plchan->slave && pl08x->pd->get_signal) { |
921 | ch->signal < 0 && | ||
922 | pl08x->pd->get_signal) { | ||
923 | ret = pl08x->pd->get_signal(plchan); | 847 | ret = pl08x->pd->get_signal(plchan); |
924 | if (ret < 0) { | 848 | if (ret < 0) { |
925 | dev_dbg(&pl08x->adev->dev, | 849 | dev_dbg(&pl08x->adev->dev, |
@@ -1008,10 +932,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | |||
1008 | * If slaves are relying on interrupts to signal completion this function | 932 | * If slaves are relying on interrupts to signal completion this function |
1009 | * must not be called with interrupts disabled. | 933 | * must not be called with interrupts disabled. |
1010 | */ | 934 | */ |
1011 | static enum dma_status | 935 | static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, |
1012 | pl08x_dma_tx_status(struct dma_chan *chan, | 936 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
1013 | dma_cookie_t cookie, | ||
1014 | struct dma_tx_state *txstate) | ||
1015 | { | 937 | { |
1016 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 938 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1017 | dma_cookie_t last_used; | 939 | dma_cookie_t last_used; |
@@ -1253,7 +1175,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
1253 | 1175 | ||
1254 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); | 1176 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); |
1255 | if (!num_llis) { | 1177 | if (!num_llis) { |
1256 | kfree(txd); | 1178 | spin_lock_irqsave(&plchan->lock, flags); |
1179 | pl08x_free_txd(pl08x, txd); | ||
1180 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1257 | return -EINVAL; | 1181 | return -EINVAL; |
1258 | } | 1182 | } |
1259 | 1183 | ||
@@ -1301,7 +1225,7 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
1301 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, | 1225 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, |
1302 | unsigned long flags) | 1226 | unsigned long flags) |
1303 | { | 1227 | { |
1304 | struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | 1228 | struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); |
1305 | 1229 | ||
1306 | if (txd) { | 1230 | if (txd) { |
1307 | dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); | 1231 | dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); |
@@ -1367,7 +1291,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1367 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1291 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1368 | struct pl08x_driver_data *pl08x = plchan->host; | 1292 | struct pl08x_driver_data *pl08x = plchan->host; |
1369 | struct pl08x_txd *txd; | 1293 | struct pl08x_txd *txd; |
1370 | int ret; | 1294 | int ret, tmp; |
1371 | 1295 | ||
1372 | /* | 1296 | /* |
1373 | * Current implementation ASSUMES only one sg | 1297 | * Current implementation ASSUMES only one sg |
@@ -1401,12 +1325,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1401 | txd->len = sgl->length; | 1325 | txd->len = sgl->length; |
1402 | 1326 | ||
1403 | if (direction == DMA_TO_DEVICE) { | 1327 | if (direction == DMA_TO_DEVICE) { |
1404 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1405 | txd->cctl = plchan->dst_cctl; | 1328 | txd->cctl = plchan->dst_cctl; |
1406 | txd->src_addr = sgl->dma_address; | 1329 | txd->src_addr = sgl->dma_address; |
1407 | txd->dst_addr = plchan->dst_addr; | 1330 | txd->dst_addr = plchan->dst_addr; |
1408 | } else if (direction == DMA_FROM_DEVICE) { | 1331 | } else if (direction == DMA_FROM_DEVICE) { |
1409 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1410 | txd->cctl = plchan->src_cctl; | 1332 | txd->cctl = plchan->src_cctl; |
1411 | txd->src_addr = plchan->src_addr; | 1333 | txd->src_addr = plchan->src_addr; |
1412 | txd->dst_addr = sgl->dma_address; | 1334 | txd->dst_addr = sgl->dma_address; |
@@ -1416,6 +1338,15 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1416 | return NULL; | 1338 | return NULL; |
1417 | } | 1339 | } |
1418 | 1340 | ||
1341 | if (plchan->cd->device_fc) | ||
1342 | tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER : | ||
1343 | PL080_FLOW_PER2MEM_PER; | ||
1344 | else | ||
1345 | tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER : | ||
1346 | PL080_FLOW_PER2MEM; | ||
1347 | |||
1348 | txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1349 | |||
1419 | ret = pl08x_prep_channel_resources(plchan, txd); | 1350 | ret = pl08x_prep_channel_resources(plchan, txd); |
1420 | if (ret) | 1351 | if (ret) |
1421 | return NULL; | 1352 | return NULL; |
@@ -1507,13 +1438,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | |||
1507 | */ | 1438 | */ |
1508 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | 1439 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) |
1509 | { | 1440 | { |
1510 | u32 val; | 1441 | writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); |
1511 | |||
1512 | val = readl(pl08x->base + PL080_CONFIG); | ||
1513 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); | ||
1514 | /* We implicitly clear bit 1 and that means little-endian mode */ | ||
1515 | val |= PL080_CONFIG_ENABLE; | ||
1516 | writel(val, pl08x->base + PL080_CONFIG); | ||
1517 | } | 1442 | } |
1518 | 1443 | ||
1519 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) | 1444 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) |
@@ -1589,8 +1514,8 @@ static void pl08x_tasklet(unsigned long data) | |||
1589 | */ | 1514 | */ |
1590 | list_for_each_entry(waiting, &pl08x->memcpy.channels, | 1515 | list_for_each_entry(waiting, &pl08x->memcpy.channels, |
1591 | chan.device_node) { | 1516 | chan.device_node) { |
1592 | if (waiting->state == PL08X_CHAN_WAITING && | 1517 | if (waiting->state == PL08X_CHAN_WAITING && |
1593 | waiting->waiting != NULL) { | 1518 | waiting->waiting != NULL) { |
1594 | int ret; | 1519 | int ret; |
1595 | 1520 | ||
1596 | /* This should REALLY not fail now */ | 1521 | /* This should REALLY not fail now */ |
@@ -1630,38 +1555,40 @@ static void pl08x_tasklet(unsigned long data) | |||
1630 | static irqreturn_t pl08x_irq(int irq, void *dev) | 1555 | static irqreturn_t pl08x_irq(int irq, void *dev) |
1631 | { | 1556 | { |
1632 | struct pl08x_driver_data *pl08x = dev; | 1557 | struct pl08x_driver_data *pl08x = dev; |
1633 | u32 mask = 0; | 1558 | u32 mask = 0, err, tc, i; |
1634 | u32 val; | 1559 | |
1635 | int i; | 1560 | /* check & clear - ERR & TC interrupts */ |
1636 | 1561 | err = readl(pl08x->base + PL080_ERR_STATUS); | |
1637 | val = readl(pl08x->base + PL080_ERR_STATUS); | 1562 | if (err) { |
1638 | if (val) { | 1563 | dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", |
1639 | /* An error interrupt (on one or more channels) */ | 1564 | __func__, err); |
1640 | dev_err(&pl08x->adev->dev, | 1565 | writel(err, pl08x->base + PL080_ERR_CLEAR); |
1641 | "%s error interrupt, register value 0x%08x\n", | ||
1642 | __func__, val); | ||
1643 | /* | ||
1644 | * Simply clear ALL PL08X error interrupts, | ||
1645 | * regardless of channel and cause | ||
1646 | * FIXME: should be 0x00000003 on PL081 really. | ||
1647 | */ | ||
1648 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | ||
1649 | } | 1566 | } |
1650 | val = readl(pl08x->base + PL080_INT_STATUS); | 1567 | tc = readl(pl08x->base + PL080_INT_STATUS); |
1568 | if (tc) | ||
1569 | writel(tc, pl08x->base + PL080_TC_CLEAR); | ||
1570 | |||
1571 | if (!err && !tc) | ||
1572 | return IRQ_NONE; | ||
1573 | |||
1651 | for (i = 0; i < pl08x->vd->channels; i++) { | 1574 | for (i = 0; i < pl08x->vd->channels; i++) { |
1652 | if ((1 << i) & val) { | 1575 | if (((1 << i) & err) || ((1 << i) & tc)) { |
1653 | /* Locate physical channel */ | 1576 | /* Locate physical channel */ |
1654 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; | 1577 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; |
1655 | struct pl08x_dma_chan *plchan = phychan->serving; | 1578 | struct pl08x_dma_chan *plchan = phychan->serving; |
1656 | 1579 | ||
1580 | if (!plchan) { | ||
1581 | dev_err(&pl08x->adev->dev, | ||
1582 | "%s Error TC interrupt on unused channel: 0x%08x\n", | ||
1583 | __func__, i); | ||
1584 | continue; | ||
1585 | } | ||
1586 | |||
1657 | /* Schedule tasklet on this channel */ | 1587 | /* Schedule tasklet on this channel */ |
1658 | tasklet_schedule(&plchan->tasklet); | 1588 | tasklet_schedule(&plchan->tasklet); |
1659 | |||
1660 | mask |= (1 << i); | 1589 | mask |= (1 << i); |
1661 | } | 1590 | } |
1662 | } | 1591 | } |
1663 | /* Clear only the terminal interrupts on channels we processed */ | ||
1664 | writel(mask, pl08x->base + PL080_TC_CLEAR); | ||
1665 | 1592 | ||
1666 | return mask ? IRQ_HANDLED : IRQ_NONE; | 1593 | return mask ? IRQ_HANDLED : IRQ_NONE; |
1667 | } | 1594 | } |
@@ -1685,9 +1612,7 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) | |||
1685 | * Make a local wrapper to hold required data | 1612 | * Make a local wrapper to hold required data |
1686 | */ | 1613 | */ |
1687 | static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | 1614 | static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, |
1688 | struct dma_device *dmadev, | 1615 | struct dma_device *dmadev, unsigned int channels, bool slave) |
1689 | unsigned int channels, | ||
1690 | bool slave) | ||
1691 | { | 1616 | { |
1692 | struct pl08x_dma_chan *chan; | 1617 | struct pl08x_dma_chan *chan; |
1693 | int i; | 1618 | int i; |
@@ -1700,7 +1625,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1700 | * to cope with that situation. | 1625 | * to cope with that situation. |
1701 | */ | 1626 | */ |
1702 | for (i = 0; i < channels; i++) { | 1627 | for (i = 0; i < channels; i++) { |
1703 | chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); | 1628 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); |
1704 | if (!chan) { | 1629 | if (!chan) { |
1705 | dev_err(&pl08x->adev->dev, | 1630 | dev_err(&pl08x->adev->dev, |
1706 | "%s no memory for channel\n", __func__); | 1631 | "%s no memory for channel\n", __func__); |
@@ -1728,7 +1653,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1728 | kfree(chan); | 1653 | kfree(chan); |
1729 | continue; | 1654 | continue; |
1730 | } | 1655 | } |
1731 | dev_info(&pl08x->adev->dev, | 1656 | dev_dbg(&pl08x->adev->dev, |
1732 | "initialize virtual channel \"%s\"\n", | 1657 | "initialize virtual channel \"%s\"\n", |
1733 | chan->name); | 1658 | chan->name); |
1734 | 1659 | ||
@@ -1837,9 +1762,9 @@ static const struct file_operations pl08x_debugfs_operations = { | |||
1837 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | 1762 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) |
1838 | { | 1763 | { |
1839 | /* Expose a simple debugfs interface to view all clocks */ | 1764 | /* Expose a simple debugfs interface to view all clocks */ |
1840 | (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, | 1765 | (void) debugfs_create_file(dev_name(&pl08x->adev->dev), |
1841 | NULL, pl08x, | 1766 | S_IFREG | S_IRUGO, NULL, pl08x, |
1842 | &pl08x_debugfs_operations); | 1767 | &pl08x_debugfs_operations); |
1843 | } | 1768 | } |
1844 | 1769 | ||
1845 | #else | 1770 | #else |
@@ -1860,12 +1785,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1860 | return ret; | 1785 | return ret; |
1861 | 1786 | ||
1862 | /* Create the driver state holder */ | 1787 | /* Create the driver state holder */ |
1863 | pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); | 1788 | pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); |
1864 | if (!pl08x) { | 1789 | if (!pl08x) { |
1865 | ret = -ENOMEM; | 1790 | ret = -ENOMEM; |
1866 | goto out_no_pl08x; | 1791 | goto out_no_pl08x; |
1867 | } | 1792 | } |
1868 | 1793 | ||
1794 | pm_runtime_set_active(&adev->dev); | ||
1795 | pm_runtime_enable(&adev->dev); | ||
1796 | |||
1869 | /* Initialize memcpy engine */ | 1797 | /* Initialize memcpy engine */ |
1870 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); | 1798 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); |
1871 | pl08x->memcpy.dev = &adev->dev; | 1799 | pl08x->memcpy.dev = &adev->dev; |
@@ -1939,7 +1867,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1939 | } | 1867 | } |
1940 | 1868 | ||
1941 | /* Initialize physical channels */ | 1869 | /* Initialize physical channels */ |
1942 | pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), | 1870 | pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)), |
1943 | GFP_KERNEL); | 1871 | GFP_KERNEL); |
1944 | if (!pl08x->phy_chans) { | 1872 | if (!pl08x->phy_chans) { |
1945 | dev_err(&adev->dev, "%s failed to allocate " | 1873 | dev_err(&adev->dev, "%s failed to allocate " |
@@ -1956,9 +1884,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1956 | spin_lock_init(&ch->lock); | 1884 | spin_lock_init(&ch->lock); |
1957 | ch->serving = NULL; | 1885 | ch->serving = NULL; |
1958 | ch->signal = -1; | 1886 | ch->signal = -1; |
1959 | dev_info(&adev->dev, | 1887 | dev_dbg(&adev->dev, "physical channel %d is %s\n", |
1960 | "physical channel %d is %s\n", i, | 1888 | i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); |
1961 | pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); | ||
1962 | } | 1889 | } |
1963 | 1890 | ||
1964 | /* Register as many memcpy channels as there are physical channels */ | 1891 | /* Register as many memcpy channels as there are physical channels */ |
@@ -1974,8 +1901,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1974 | 1901 | ||
1975 | /* Register slave channels */ | 1902 | /* Register slave channels */ |
1976 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, | 1903 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, |
1977 | pl08x->pd->num_slave_channels, | 1904 | pl08x->pd->num_slave_channels, true); |
1978 | true); | ||
1979 | if (ret <= 0) { | 1905 | if (ret <= 0) { |
1980 | dev_warn(&pl08x->adev->dev, | 1906 | dev_warn(&pl08x->adev->dev, |
1981 | "%s failed to enumerate slave channels - %d\n", | 1907 | "%s failed to enumerate slave channels - %d\n", |
@@ -2005,6 +1931,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2005 | dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", | 1931 | dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", |
2006 | amba_part(adev), amba_rev(adev), | 1932 | amba_part(adev), amba_rev(adev), |
2007 | (unsigned long long)adev->res.start, adev->irq[0]); | 1933 | (unsigned long long)adev->res.start, adev->irq[0]); |
1934 | |||
1935 | pm_runtime_put(&adev->dev); | ||
2008 | return 0; | 1936 | return 0; |
2009 | 1937 | ||
2010 | out_no_slave_reg: | 1938 | out_no_slave_reg: |
@@ -2023,6 +1951,9 @@ out_no_ioremap: | |||
2023 | dma_pool_destroy(pl08x->pool); | 1951 | dma_pool_destroy(pl08x->pool); |
2024 | out_no_lli_pool: | 1952 | out_no_lli_pool: |
2025 | out_no_platdata: | 1953 | out_no_platdata: |
1954 | pm_runtime_put(&adev->dev); | ||
1955 | pm_runtime_disable(&adev->dev); | ||
1956 | |||
2026 | kfree(pl08x); | 1957 | kfree(pl08x); |
2027 | out_no_pl08x: | 1958 | out_no_pl08x: |
2028 | amba_release_regions(adev); | 1959 | amba_release_regions(adev); |