aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-14 21:48:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-14 21:48:26 -0400
commitb86a7563ca617aa49dfd6b836da4dd0351fe2acc (patch)
tree29b6dc87f9a8f9cf2d1aee5e24c4f697240651a0
parentdc75117fa28af52220cf31ea18493684cf0ffc35 (diff)
parent88d04643c66052a1cf92a6fd5f92dff0f7757f61 (diff)
Merge branch 'fixes' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine fixes from Vinod Koul: "Here are hopefully last set of fixes for 4.1. This time we have: - fixing pause capability reporting on both dmaengine pause & resume support by Krzysztof - locking fix fir at_xdmac by Ludovic - slave configuration fix for at_xdmac by Ludovic" * 'fixes' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: Fix choppy sound because of unimplemented resume dmaengine: at_xdmac: rework slave configuration part dmaengine: at_xdmac: lock fixes
-rw-r--r--drivers/dma/at_xdmac.c231
-rw-r--r--drivers/dma/dmaengine.c6
2 files changed, 144 insertions, 93 deletions
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 933e4b338459..7992164ea9ec 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -174,6 +174,8 @@
174#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ 174#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
175 175
176#define AT_XDMAC_MAX_CHAN 0x20 176#define AT_XDMAC_MAX_CHAN 0x20
177#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
178#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
177 179
178#define AT_XDMAC_DMA_BUSWIDTHS\ 180#define AT_XDMAC_DMA_BUSWIDTHS\
179 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ 181 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -192,20 +194,17 @@ struct at_xdmac_chan {
192 struct dma_chan chan; 194 struct dma_chan chan;
193 void __iomem *ch_regs; 195 void __iomem *ch_regs;
194 u32 mask; /* Channel Mask */ 196 u32 mask; /* Channel Mask */
195 u32 cfg[2]; /* Channel Configuration Register */ 197 u32 cfg; /* Channel Configuration Register */
196 #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */
197 #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */
198 u8 perid; /* Peripheral ID */ 198 u8 perid; /* Peripheral ID */
199 u8 perif; /* Peripheral Interface */ 199 u8 perif; /* Peripheral Interface */
200 u8 memif; /* Memory Interface */ 200 u8 memif; /* Memory Interface */
201 u32 per_src_addr;
202 u32 per_dst_addr;
203 u32 save_cc; 201 u32 save_cc;
204 u32 save_cim; 202 u32 save_cim;
205 u32 save_cnda; 203 u32 save_cnda;
206 u32 save_cndc; 204 u32 save_cndc;
207 unsigned long status; 205 unsigned long status;
208 struct tasklet_struct tasklet; 206 struct tasklet_struct tasklet;
207 struct dma_slave_config sconfig;
209 208
210 spinlock_t lock; 209 spinlock_t lock;
211 210
@@ -415,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
415 struct at_xdmac_desc *desc = txd_to_at_desc(tx); 414 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
416 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); 415 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
417 dma_cookie_t cookie; 416 dma_cookie_t cookie;
417 unsigned long irqflags;
418 418
419 spin_lock_bh(&atchan->lock); 419 spin_lock_irqsave(&atchan->lock, irqflags);
420 cookie = dma_cookie_assign(tx); 420 cookie = dma_cookie_assign(tx);
421 421
422 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", 422 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
@@ -425,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
425 if (list_is_singular(&atchan->xfers_list)) 425 if (list_is_singular(&atchan->xfers_list))
426 at_xdmac_start_xfer(atchan, desc); 426 at_xdmac_start_xfer(atchan, desc);
427 427
428 spin_unlock_bh(&atchan->lock); 428 spin_unlock_irqrestore(&atchan->lock, irqflags);
429 return cookie; 429 return cookie;
430} 430}
431 431
@@ -494,61 +494,94 @@ static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
494 return chan; 494 return chan;
495} 495}
496 496
497static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
498 enum dma_transfer_direction direction)
499{
500 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
501 int csize, dwidth;
502
503 if (direction == DMA_DEV_TO_MEM) {
504 atchan->cfg =
505 AT91_XDMAC_DT_PERID(atchan->perid)
506 | AT_XDMAC_CC_DAM_INCREMENTED_AM
507 | AT_XDMAC_CC_SAM_FIXED_AM
508 | AT_XDMAC_CC_DIF(atchan->memif)
509 | AT_XDMAC_CC_SIF(atchan->perif)
510 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
511 | AT_XDMAC_CC_DSYNC_PER2MEM
512 | AT_XDMAC_CC_MBSIZE_SIXTEEN
513 | AT_XDMAC_CC_TYPE_PER_TRAN;
514 csize = ffs(atchan->sconfig.src_maxburst) - 1;
515 if (csize < 0) {
516 dev_err(chan2dev(chan), "invalid src maxburst value\n");
517 return -EINVAL;
518 }
519 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
520 dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
521 if (dwidth < 0) {
522 dev_err(chan2dev(chan), "invalid src addr width value\n");
523 return -EINVAL;
524 }
525 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
526 } else if (direction == DMA_MEM_TO_DEV) {
527 atchan->cfg =
528 AT91_XDMAC_DT_PERID(atchan->perid)
529 | AT_XDMAC_CC_DAM_FIXED_AM
530 | AT_XDMAC_CC_SAM_INCREMENTED_AM
531 | AT_XDMAC_CC_DIF(atchan->perif)
532 | AT_XDMAC_CC_SIF(atchan->memif)
533 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
534 | AT_XDMAC_CC_DSYNC_MEM2PER
535 | AT_XDMAC_CC_MBSIZE_SIXTEEN
536 | AT_XDMAC_CC_TYPE_PER_TRAN;
537 csize = ffs(atchan->sconfig.dst_maxburst) - 1;
538 if (csize < 0) {
539 dev_err(chan2dev(chan), "invalid src maxburst value\n");
540 return -EINVAL;
541 }
542 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
543 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
544 if (dwidth < 0) {
545 dev_err(chan2dev(chan), "invalid dst addr width value\n");
546 return -EINVAL;
547 }
548 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
549 }
550
551 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
552
553 return 0;
554}
555
556/*
557 * Only check that maxburst and addr width values are supported by the
558 * the controller but not that the configuration is good to perform the
559 * transfer since we don't know the direction at this stage.
560 */
561static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
562{
563 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
564 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
565 return -EINVAL;
566
567 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
568 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
569 return -EINVAL;
570
571 return 0;
572}
573
497static int at_xdmac_set_slave_config(struct dma_chan *chan, 574static int at_xdmac_set_slave_config(struct dma_chan *chan,
498 struct dma_slave_config *sconfig) 575 struct dma_slave_config *sconfig)
499{ 576{
500 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 577 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
501 u8 dwidth;
502 int csize;
503 578
504 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] = 579 if (at_xdmac_check_slave_config(sconfig)) {
505 AT91_XDMAC_DT_PERID(atchan->perid) 580 dev_err(chan2dev(chan), "invalid slave configuration\n");
506 | AT_XDMAC_CC_DAM_INCREMENTED_AM
507 | AT_XDMAC_CC_SAM_FIXED_AM
508 | AT_XDMAC_CC_DIF(atchan->memif)
509 | AT_XDMAC_CC_SIF(atchan->perif)
510 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
511 | AT_XDMAC_CC_DSYNC_PER2MEM
512 | AT_XDMAC_CC_MBSIZE_SIXTEEN
513 | AT_XDMAC_CC_TYPE_PER_TRAN;
514 csize = at_xdmac_csize(sconfig->src_maxburst);
515 if (csize < 0) {
516 dev_err(chan2dev(chan), "invalid src maxburst value\n");
517 return -EINVAL; 581 return -EINVAL;
518 } 582 }
519 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
520 dwidth = ffs(sconfig->src_addr_width) - 1;
521 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
522
523
524 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
525 AT91_XDMAC_DT_PERID(atchan->perid)
526 | AT_XDMAC_CC_DAM_FIXED_AM
527 | AT_XDMAC_CC_SAM_INCREMENTED_AM
528 | AT_XDMAC_CC_DIF(atchan->perif)
529 | AT_XDMAC_CC_SIF(atchan->memif)
530 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
531 | AT_XDMAC_CC_DSYNC_MEM2PER
532 | AT_XDMAC_CC_MBSIZE_SIXTEEN
533 | AT_XDMAC_CC_TYPE_PER_TRAN;
534 csize = at_xdmac_csize(sconfig->dst_maxburst);
535 if (csize < 0) {
536 dev_err(chan2dev(chan), "invalid src maxburst value\n");
537 return -EINVAL;
538 }
539 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
540 dwidth = ffs(sconfig->dst_addr_width) - 1;
541 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
542
543 /* Src and dst addr are needed to configure the link list descriptor. */
544 atchan->per_src_addr = sconfig->src_addr;
545 atchan->per_dst_addr = sconfig->dst_addr;
546 583
547 dev_dbg(chan2dev(chan), 584 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
548 "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
549 __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
550 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
551 atchan->per_src_addr, atchan->per_dst_addr);
552 585
553 return 0; 586 return 0;
554} 587}
@@ -563,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
563 struct scatterlist *sg; 596 struct scatterlist *sg;
564 int i; 597 int i;
565 unsigned int xfer_size = 0; 598 unsigned int xfer_size = 0;
599 unsigned long irqflags;
600 struct dma_async_tx_descriptor *ret = NULL;
566 601
567 if (!sgl) 602 if (!sgl)
568 return NULL; 603 return NULL;
@@ -578,7 +613,10 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
578 flags); 613 flags);
579 614
580 /* Protect dma_sconfig field that can be modified by set_slave_conf. */ 615 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
581 spin_lock_bh(&atchan->lock); 616 spin_lock_irqsave(&atchan->lock, irqflags);
617
618 if (at_xdmac_compute_chan_conf(chan, direction))
619 goto spin_unlock;
582 620
583 /* Prepare descriptors. */ 621 /* Prepare descriptors. */
584 for_each_sg(sgl, sg, sg_len, i) { 622 for_each_sg(sgl, sg, sg_len, i) {
@@ -589,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
589 mem = sg_dma_address(sg); 627 mem = sg_dma_address(sg);
590 if (unlikely(!len)) { 628 if (unlikely(!len)) {
591 dev_err(chan2dev(chan), "sg data length is zero\n"); 629 dev_err(chan2dev(chan), "sg data length is zero\n");
592 spin_unlock_bh(&atchan->lock); 630 goto spin_unlock;
593 return NULL;
594 } 631 }
595 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", 632 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
596 __func__, i, len, mem); 633 __func__, i, len, mem);
@@ -600,20 +637,18 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
600 dev_err(chan2dev(chan), "can't get descriptor\n"); 637 dev_err(chan2dev(chan), "can't get descriptor\n");
601 if (first) 638 if (first)
602 list_splice_init(&first->descs_list, &atchan->free_descs_list); 639 list_splice_init(&first->descs_list, &atchan->free_descs_list);
603 spin_unlock_bh(&atchan->lock); 640 goto spin_unlock;
604 return NULL;
605 } 641 }
606 642
607 /* Linked list descriptor setup. */ 643 /* Linked list descriptor setup. */
608 if (direction == DMA_DEV_TO_MEM) { 644 if (direction == DMA_DEV_TO_MEM) {
609 desc->lld.mbr_sa = atchan->per_src_addr; 645 desc->lld.mbr_sa = atchan->sconfig.src_addr;
610 desc->lld.mbr_da = mem; 646 desc->lld.mbr_da = mem;
611 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
612 } else { 647 } else {
613 desc->lld.mbr_sa = mem; 648 desc->lld.mbr_sa = mem;
614 desc->lld.mbr_da = atchan->per_dst_addr; 649 desc->lld.mbr_da = atchan->sconfig.dst_addr;
615 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
616 } 650 }
651 desc->lld.mbr_cfg = atchan->cfg;
617 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); 652 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
618 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) 653 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
619 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) 654 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
@@ -645,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
645 xfer_size += len; 680 xfer_size += len;
646 } 681 }
647 682
648 spin_unlock_bh(&atchan->lock);
649 683
650 first->tx_dma_desc.flags = flags; 684 first->tx_dma_desc.flags = flags;
651 first->xfer_size = xfer_size; 685 first->xfer_size = xfer_size;
652 first->direction = direction; 686 first->direction = direction;
687 ret = &first->tx_dma_desc;
653 688
654 return &first->tx_dma_desc; 689spin_unlock:
690 spin_unlock_irqrestore(&atchan->lock, irqflags);
691 return ret;
655} 692}
656 693
657static struct dma_async_tx_descriptor * 694static struct dma_async_tx_descriptor *
@@ -664,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
664 struct at_xdmac_desc *first = NULL, *prev = NULL; 701 struct at_xdmac_desc *first = NULL, *prev = NULL;
665 unsigned int periods = buf_len / period_len; 702 unsigned int periods = buf_len / period_len;
666 int i; 703 int i;
704 unsigned long irqflags;
667 705
668 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", 706 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
669 __func__, &buf_addr, buf_len, period_len, 707 __func__, &buf_addr, buf_len, period_len,
@@ -679,32 +717,34 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
679 return NULL; 717 return NULL;
680 } 718 }
681 719
720 if (at_xdmac_compute_chan_conf(chan, direction))
721 return NULL;
722
682 for (i = 0; i < periods; i++) { 723 for (i = 0; i < periods; i++) {
683 struct at_xdmac_desc *desc = NULL; 724 struct at_xdmac_desc *desc = NULL;
684 725
685 spin_lock_bh(&atchan->lock); 726 spin_lock_irqsave(&atchan->lock, irqflags);
686 desc = at_xdmac_get_desc(atchan); 727 desc = at_xdmac_get_desc(atchan);
687 if (!desc) { 728 if (!desc) {
688 dev_err(chan2dev(chan), "can't get descriptor\n"); 729 dev_err(chan2dev(chan), "can't get descriptor\n");
689 if (first) 730 if (first)
690 list_splice_init(&first->descs_list, &atchan->free_descs_list); 731 list_splice_init(&first->descs_list, &atchan->free_descs_list);
691 spin_unlock_bh(&atchan->lock); 732 spin_unlock_irqrestore(&atchan->lock, irqflags);
692 return NULL; 733 return NULL;
693 } 734 }
694 spin_unlock_bh(&atchan->lock); 735 spin_unlock_irqrestore(&atchan->lock, irqflags);
695 dev_dbg(chan2dev(chan), 736 dev_dbg(chan2dev(chan),
696 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", 737 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
697 __func__, desc, &desc->tx_dma_desc.phys); 738 __func__, desc, &desc->tx_dma_desc.phys);
698 739
699 if (direction == DMA_DEV_TO_MEM) { 740 if (direction == DMA_DEV_TO_MEM) {
700 desc->lld.mbr_sa = atchan->per_src_addr; 741 desc->lld.mbr_sa = atchan->sconfig.src_addr;
701 desc->lld.mbr_da = buf_addr + i * period_len; 742 desc->lld.mbr_da = buf_addr + i * period_len;
702 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
703 } else { 743 } else {
704 desc->lld.mbr_sa = buf_addr + i * period_len; 744 desc->lld.mbr_sa = buf_addr + i * period_len;
705 desc->lld.mbr_da = atchan->per_dst_addr; 745 desc->lld.mbr_da = atchan->sconfig.dst_addr;
706 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
707 } 746 }
747 desc->lld.mbr_cfg = atchan->cfg;
708 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 748 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
709 | AT_XDMAC_MBR_UBC_NDEN 749 | AT_XDMAC_MBR_UBC_NDEN
710 | AT_XDMAC_MBR_UBC_NSEN 750 | AT_XDMAC_MBR_UBC_NSEN
@@ -766,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
766 | AT_XDMAC_CC_SIF(0) 806 | AT_XDMAC_CC_SIF(0)
767 | AT_XDMAC_CC_MBSIZE_SIXTEEN 807 | AT_XDMAC_CC_MBSIZE_SIXTEEN
768 | AT_XDMAC_CC_TYPE_MEM_TRAN; 808 | AT_XDMAC_CC_TYPE_MEM_TRAN;
809 unsigned long irqflags;
769 810
770 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", 811 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
771 __func__, &src, &dest, len, flags); 812 __func__, &src, &dest, len, flags);
@@ -798,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
798 839
799 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); 840 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
800 841
801 spin_lock_bh(&atchan->lock); 842 spin_lock_irqsave(&atchan->lock, irqflags);
802 desc = at_xdmac_get_desc(atchan); 843 desc = at_xdmac_get_desc(atchan);
803 spin_unlock_bh(&atchan->lock); 844 spin_unlock_irqrestore(&atchan->lock, irqflags);
804 if (!desc) { 845 if (!desc) {
805 dev_err(chan2dev(chan), "can't get descriptor\n"); 846 dev_err(chan2dev(chan), "can't get descriptor\n");
806 if (first) 847 if (first)
@@ -886,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
886 int residue; 927 int residue;
887 u32 cur_nda, mask, value; 928 u32 cur_nda, mask, value;
888 u8 dwidth = 0; 929 u8 dwidth = 0;
930 unsigned long flags;
889 931
890 ret = dma_cookie_status(chan, cookie, txstate); 932 ret = dma_cookie_status(chan, cookie, txstate);
891 if (ret == DMA_COMPLETE) 933 if (ret == DMA_COMPLETE)
@@ -894,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
894 if (!txstate) 936 if (!txstate)
895 return ret; 937 return ret;
896 938
897 spin_lock_bh(&atchan->lock); 939 spin_lock_irqsave(&atchan->lock, flags);
898 940
899 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); 941 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
900 942
@@ -904,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
904 */ 946 */
905 if (!desc->active_xfer) { 947 if (!desc->active_xfer) {
906 dma_set_residue(txstate, desc->xfer_size); 948 dma_set_residue(txstate, desc->xfer_size);
907 spin_unlock_bh(&atchan->lock); 949 goto spin_unlock;
908 return ret;
909 } 950 }
910 951
911 residue = desc->xfer_size; 952 residue = desc->xfer_size;
@@ -936,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
936 } 977 }
937 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; 978 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
938 979
939 spin_unlock_bh(&atchan->lock);
940
941 dma_set_residue(txstate, residue); 980 dma_set_residue(txstate, residue);
942 981
943 dev_dbg(chan2dev(chan), 982 dev_dbg(chan2dev(chan),
944 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", 983 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
945 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); 984 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
946 985
986spin_unlock:
987 spin_unlock_irqrestore(&atchan->lock, flags);
947 return ret; 988 return ret;
948} 989}
949 990
@@ -964,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
964static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) 1005static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
965{ 1006{
966 struct at_xdmac_desc *desc; 1007 struct at_xdmac_desc *desc;
1008 unsigned long flags;
967 1009
968 spin_lock_bh(&atchan->lock); 1010 spin_lock_irqsave(&atchan->lock, flags);
969 1011
970 /* 1012 /*
971 * If channel is enabled, do nothing, advance_work will be triggered 1013 * If channel is enabled, do nothing, advance_work will be triggered
@@ -980,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
980 at_xdmac_start_xfer(atchan, desc); 1022 at_xdmac_start_xfer(atchan, desc);
981 } 1023 }
982 1024
983 spin_unlock_bh(&atchan->lock); 1025 spin_unlock_irqrestore(&atchan->lock, flags);
984} 1026}
985 1027
986static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) 1028static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1116,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
1116{ 1158{
1117 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1159 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1118 int ret; 1160 int ret;
1161 unsigned long flags;
1119 1162
1120 dev_dbg(chan2dev(chan), "%s\n", __func__); 1163 dev_dbg(chan2dev(chan), "%s\n", __func__);
1121 1164
1122 spin_lock_bh(&atchan->lock); 1165 spin_lock_irqsave(&atchan->lock, flags);
1123 ret = at_xdmac_set_slave_config(chan, config); 1166 ret = at_xdmac_set_slave_config(chan, config);
1124 spin_unlock_bh(&atchan->lock); 1167 spin_unlock_irqrestore(&atchan->lock, flags);
1125 1168
1126 return ret; 1169 return ret;
1127} 1170}
@@ -1130,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
1130{ 1173{
1131 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1174 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1132 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1175 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1176 unsigned long flags;
1133 1177
1134 dev_dbg(chan2dev(chan), "%s\n", __func__); 1178 dev_dbg(chan2dev(chan), "%s\n", __func__);
1135 1179
1136 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) 1180 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1137 return 0; 1181 return 0;
1138 1182
1139 spin_lock_bh(&atchan->lock); 1183 spin_lock_irqsave(&atchan->lock, flags);
1140 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); 1184 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1141 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) 1185 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1142 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) 1186 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1143 cpu_relax(); 1187 cpu_relax();
1144 spin_unlock_bh(&atchan->lock); 1188 spin_unlock_irqrestore(&atchan->lock, flags);
1145 1189
1146 return 0; 1190 return 0;
1147} 1191}
@@ -1150,18 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
1150{ 1194{
1151 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1195 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1152 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1196 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1197 unsigned long flags;
1153 1198
1154 dev_dbg(chan2dev(chan), "%s\n", __func__); 1199 dev_dbg(chan2dev(chan), "%s\n", __func__);
1155 1200
1156 spin_lock_bh(&atchan->lock); 1201 spin_lock_irqsave(&atchan->lock, flags);
1157 if (!at_xdmac_chan_is_paused(atchan)) { 1202 if (!at_xdmac_chan_is_paused(atchan)) {
1158 spin_unlock_bh(&atchan->lock); 1203 spin_unlock_irqrestore(&atchan->lock, flags);
1159 return 0; 1204 return 0;
1160 } 1205 }
1161 1206
1162 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); 1207 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1163 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); 1208 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1164 spin_unlock_bh(&atchan->lock); 1209 spin_unlock_irqrestore(&atchan->lock, flags);
1165 1210
1166 return 0; 1211 return 0;
1167} 1212}
@@ -1171,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1171 struct at_xdmac_desc *desc, *_desc; 1216 struct at_xdmac_desc *desc, *_desc;
1172 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1217 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1173 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1218 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1219 unsigned long flags;
1174 1220
1175 dev_dbg(chan2dev(chan), "%s\n", __func__); 1221 dev_dbg(chan2dev(chan), "%s\n", __func__);
1176 1222
1177 spin_lock_bh(&atchan->lock); 1223 spin_lock_irqsave(&atchan->lock, flags);
1178 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1224 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1179 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) 1225 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1180 cpu_relax(); 1226 cpu_relax();
@@ -1184,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1184 at_xdmac_remove_xfer(atchan, desc); 1230 at_xdmac_remove_xfer(atchan, desc);
1185 1231
1186 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); 1232 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1187 spin_unlock_bh(&atchan->lock); 1233 spin_unlock_irqrestore(&atchan->lock, flags);
1188 1234
1189 return 0; 1235 return 0;
1190} 1236}
@@ -1194,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1194 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1240 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1195 struct at_xdmac_desc *desc; 1241 struct at_xdmac_desc *desc;
1196 int i; 1242 int i;
1243 unsigned long flags;
1197 1244
1198 spin_lock_bh(&atchan->lock); 1245 spin_lock_irqsave(&atchan->lock, flags);
1199 1246
1200 if (at_xdmac_chan_is_enabled(atchan)) { 1247 if (at_xdmac_chan_is_enabled(atchan)) {
1201 dev_err(chan2dev(chan), 1248 dev_err(chan2dev(chan),
@@ -1226,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1226 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); 1273 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1227 1274
1228spin_unlock: 1275spin_unlock:
1229 spin_unlock_bh(&atchan->lock); 1276 spin_unlock_irqrestore(&atchan->lock, flags);
1230 return i; 1277 return i;
1231} 1278}
1232 1279
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 2890d744bb1b..3ddfd1f6c23c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -487,7 +487,11 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
487 caps->directions = device->directions; 487 caps->directions = device->directions;
488 caps->residue_granularity = device->residue_granularity; 488 caps->residue_granularity = device->residue_granularity;
489 489
490 caps->cmd_pause = !!device->device_pause; 490 /*
491 * Some devices implement only pause (e.g. to get residuum) but no
492 * resume. However cmd_pause is advertised as pause AND resume.
493 */
494 caps->cmd_pause = !!(device->device_pause && device->device_resume);
491 caps->cmd_terminate = !!device->device_terminate_all; 495 caps->cmd_terminate = !!device->device_terminate_all;
492 496
493 return 0; 497 return 0;