aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2015-05-22 09:15:16 -0400
committerMark Brown <broonie@kernel.org>2015-05-22 09:15:16 -0400
commite8a07d60c932efbd44bee50e3fa95f133b8c28be (patch)
treed9f0a79dc6d05effc21750f03a93b1125a771231
parentb723550d7e84b6b59d427d560be49d8ab177ea89 (diff)
parentd23c9a0a5c237210bccb82a2e9dc0d26e75920ee (diff)
Merge branch 'topic/rcar-dma' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into asoc-rcar
-rw-r--r--drivers/dma/sh/rcar-dmac.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index a18d16cc4795..e0302c784ba4 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -465,6 +465,7 @@ static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
465static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) 465static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
466{ 466{
467 struct rcar_dmac_desc_page *page; 467 struct rcar_dmac_desc_page *page;
468 unsigned long flags;
468 LIST_HEAD(list); 469 LIST_HEAD(list);
469 unsigned int i; 470 unsigned int i;
470 471
@@ -482,10 +483,10 @@ static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
482 list_add_tail(&desc->node, &list); 483 list_add_tail(&desc->node, &list);
483 } 484 }
484 485
485 spin_lock_irq(&chan->lock); 486 spin_lock_irqsave(&chan->lock, flags);
486 list_splice_tail(&list, &chan->desc.free); 487 list_splice_tail(&list, &chan->desc.free);
487 list_add_tail(&page->node, &chan->desc.pages); 488 list_add_tail(&page->node, &chan->desc.pages);
488 spin_unlock_irq(&chan->lock); 489 spin_unlock_irqrestore(&chan->lock, flags);
489 490
490 return 0; 491 return 0;
491} 492}
@@ -516,6 +517,7 @@ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
516static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan) 517static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
517{ 518{
518 struct rcar_dmac_desc *desc, *_desc; 519 struct rcar_dmac_desc *desc, *_desc;
520 unsigned long flags;
519 LIST_HEAD(list); 521 LIST_HEAD(list);
520 522
521 /* 523 /*
@@ -524,9 +526,9 @@ static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
524 * list_for_each_entry_safe, isn't safe if we release the channel lock 526 * list_for_each_entry_safe, isn't safe if we release the channel lock
525 * around the rcar_dmac_desc_put() call. 527 * around the rcar_dmac_desc_put() call.
526 */ 528 */
527 spin_lock_irq(&chan->lock); 529 spin_lock_irqsave(&chan->lock, flags);
528 list_splice_init(&chan->desc.wait, &list); 530 list_splice_init(&chan->desc.wait, &list);
529 spin_unlock_irq(&chan->lock); 531 spin_unlock_irqrestore(&chan->lock, flags);
530 532
531 list_for_each_entry_safe(desc, _desc, &list, node) { 533 list_for_each_entry_safe(desc, _desc, &list, node) {
532 if (async_tx_test_ack(&desc->async_tx)) { 534 if (async_tx_test_ack(&desc->async_tx)) {
@@ -539,9 +541,9 @@ static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
539 return; 541 return;
540 542
541 /* Put the remaining descriptors back in the wait list. */ 543 /* Put the remaining descriptors back in the wait list. */
542 spin_lock_irq(&chan->lock); 544 spin_lock_irqsave(&chan->lock, flags);
543 list_splice(&list, &chan->desc.wait); 545 list_splice(&list, &chan->desc.wait);
544 spin_unlock_irq(&chan->lock); 546 spin_unlock_irqrestore(&chan->lock, flags);
545} 547}
546 548
547/* 549/*
@@ -556,12 +558,13 @@ static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
556static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan) 558static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
557{ 559{
558 struct rcar_dmac_desc *desc; 560 struct rcar_dmac_desc *desc;
561 unsigned long flags;
559 int ret; 562 int ret;
560 563
561 /* Recycle acked descriptors before attempting allocation. */ 564 /* Recycle acked descriptors before attempting allocation. */
562 rcar_dmac_desc_recycle_acked(chan); 565 rcar_dmac_desc_recycle_acked(chan);
563 566
564 spin_lock_irq(&chan->lock); 567 spin_lock_irqsave(&chan->lock, flags);
565 568
566 while (list_empty(&chan->desc.free)) { 569 while (list_empty(&chan->desc.free)) {
567 /* 570 /*
@@ -570,17 +573,17 @@ static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
570 * allocated descriptors. If the allocation fails return an 573 * allocated descriptors. If the allocation fails return an
571 * error. 574 * error.
572 */ 575 */
573 spin_unlock_irq(&chan->lock); 576 spin_unlock_irqrestore(&chan->lock, flags);
574 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); 577 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
575 if (ret < 0) 578 if (ret < 0)
576 return NULL; 579 return NULL;
577 spin_lock_irq(&chan->lock); 580 spin_lock_irqsave(&chan->lock, flags);
578 } 581 }
579 582
580 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); 583 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
581 list_del(&desc->node); 584 list_del(&desc->node);
582 585
583 spin_unlock_irq(&chan->lock); 586 spin_unlock_irqrestore(&chan->lock, flags);
584 587
585 return desc; 588 return desc;
586} 589}
@@ -593,6 +596,7 @@ static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
593static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) 596static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
594{ 597{
595 struct rcar_dmac_desc_page *page; 598 struct rcar_dmac_desc_page *page;
599 unsigned long flags;
596 LIST_HEAD(list); 600 LIST_HEAD(list);
597 unsigned int i; 601 unsigned int i;
598 602
@@ -606,10 +610,10 @@ static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
606 list_add_tail(&chunk->node, &list); 610 list_add_tail(&chunk->node, &list);
607 } 611 }
608 612
609 spin_lock_irq(&chan->lock); 613 spin_lock_irqsave(&chan->lock, flags);
610 list_splice_tail(&list, &chan->desc.chunks_free); 614 list_splice_tail(&list, &chan->desc.chunks_free);
611 list_add_tail(&page->node, &chan->desc.pages); 615 list_add_tail(&page->node, &chan->desc.pages);
612 spin_unlock_irq(&chan->lock); 616 spin_unlock_irqrestore(&chan->lock, flags);
613 617
614 return 0; 618 return 0;
615} 619}
@@ -627,9 +631,10 @@ static struct rcar_dmac_xfer_chunk *
627rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) 631rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
628{ 632{
629 struct rcar_dmac_xfer_chunk *chunk; 633 struct rcar_dmac_xfer_chunk *chunk;
634 unsigned long flags;
630 int ret; 635 int ret;
631 636
632 spin_lock_irq(&chan->lock); 637 spin_lock_irqsave(&chan->lock, flags);
633 638
634 while (list_empty(&chan->desc.chunks_free)) { 639 while (list_empty(&chan->desc.chunks_free)) {
635 /* 640 /*
@@ -638,18 +643,18 @@ rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
638 * allocated descriptors. If the allocation fails return an 643 * allocated descriptors. If the allocation fails return an
639 * error. 644 * error.
640 */ 645 */
641 spin_unlock_irq(&chan->lock); 646 spin_unlock_irqrestore(&chan->lock, flags);
642 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); 647 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
643 if (ret < 0) 648 if (ret < 0)
644 return NULL; 649 return NULL;
645 spin_lock_irq(&chan->lock); 650 spin_lock_irqsave(&chan->lock, flags);
646 } 651 }
647 652
648 chunk = list_first_entry(&chan->desc.chunks_free, 653 chunk = list_first_entry(&chan->desc.chunks_free,
649 struct rcar_dmac_xfer_chunk, node); 654 struct rcar_dmac_xfer_chunk, node);
650 list_del(&chunk->node); 655 list_del(&chunk->node);
651 656
652 spin_unlock_irq(&chan->lock); 657 spin_unlock_irqrestore(&chan->lock, flags);
653 658
654 return chunk; 659 return chunk;
655} 660}