aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2014-08-29 09:32:29 -0400
committerTakashi Iwai <tiwai@suse.de>2014-09-03 08:04:08 -0400
commit257f8cce5d40b811d229ed71602882baa0012808 (patch)
tree190a239257c625539a1b157acb3bd0075cdb61dc
parent52addcf9d6669fa439387610bc65c92fa0980cef (diff)
ALSA: pcm: Allow nonatomic trigger operations
Currently, many PCM operations are performed in a critical section protected by spinlock, typically the trigger and pointer callbacks are assumed to be atomic. This is basically because some trigger action (e.g. PCM stop after drain or xrun) is done in the interrupt handler. If a driver runs in a threaded irq, however, this doesn't have to be atomic. And many devices want to handle trigger in a non-atomic context due to lengthy communications. This patch tries all PCM calls operational in non-atomic context. What it does is very simple: replaces the substream spinlock with the corresponding substream mutex when pcm->nonatomic flag is set. The driver that wants to use the non-atomic PCM ops just needs to set the flag and keep the rest as is. (Of course, it must not handle any PCM ops in irq context.) Note that the code doesn't check whether it's atomic-safe or not, but trust in 100% that the driver sets pcm->nonatomic correctly. One possible problem is the case where linked PCM substreams have inconsistent nonatomic states. For avoiding this, snd_pcm_link() returns an error if one tries to link an inconsistent PCM substream. Signed-off-by: Takashi Iwai <tiwai@suse.de>
-rw-r--r--include/sound/pcm.h58
-rw-r--r--sound/core/pcm.c1
-rw-r--r--sound/core/pcm_native.c76
3 files changed, 116 insertions, 19 deletions
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 6f3e10ca0e32..bc79962f4aa6 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -365,6 +365,7 @@ struct snd_pcm_runtime {
365 365
366struct snd_pcm_group { /* keep linked substreams */ 366struct snd_pcm_group { /* keep linked substreams */
367 spinlock_t lock; 367 spinlock_t lock;
368 struct mutex mutex;
368 struct list_head substreams; 369 struct list_head substreams;
369 int count; 370 int count;
370}; 371};
@@ -460,6 +461,7 @@ struct snd_pcm {
460 void (*private_free) (struct snd_pcm *pcm); 461 void (*private_free) (struct snd_pcm *pcm);
461 struct device *dev; /* actual hw device this belongs to */ 462 struct device *dev; /* actual hw device this belongs to */
462 bool internal; /* pcm is for internal use only */ 463 bool internal; /* pcm is for internal use only */
464 bool nonatomic; /* whole PCM operations are in non-atomic context */
463#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE) 465#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
464 struct snd_pcm_oss oss; 466 struct snd_pcm_oss oss;
465#endif 467#endif
@@ -493,6 +495,7 @@ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree);
493 */ 495 */
494 496
495extern rwlock_t snd_pcm_link_rwlock; 497extern rwlock_t snd_pcm_link_rwlock;
498extern struct rw_semaphore snd_pcm_link_rwsem;
496 499
497int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info); 500int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info);
498int snd_pcm_info_user(struct snd_pcm_substream *substream, 501int snd_pcm_info_user(struct snd_pcm_substream *substream,
@@ -539,38 +542,69 @@ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
539 542
540static inline void snd_pcm_stream_lock(struct snd_pcm_substream *substream) 543static inline void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
541{ 544{
542 read_lock(&snd_pcm_link_rwlock); 545 if (substream->pcm->nonatomic) {
543 spin_lock(&substream->self_group.lock); 546 down_read(&snd_pcm_link_rwsem);
547 mutex_lock(&substream->self_group.mutex);
548 } else {
549 read_lock(&snd_pcm_link_rwlock);
550 spin_lock(&substream->self_group.lock);
551 }
544} 552}
545 553
546static inline void snd_pcm_stream_unlock(struct snd_pcm_substream *substream) 554static inline void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
547{ 555{
548 spin_unlock(&substream->self_group.lock); 556 if (substream->pcm->nonatomic) {
549 read_unlock(&snd_pcm_link_rwlock); 557 mutex_unlock(&substream->self_group.mutex);
558 up_read(&snd_pcm_link_rwsem);
559 } else {
560 spin_unlock(&substream->self_group.lock);
561 read_unlock(&snd_pcm_link_rwlock);
562 }
550} 563}
551 564
552static inline void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) 565static inline void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
553{ 566{
554 read_lock_irq(&snd_pcm_link_rwlock); 567 if (substream->pcm->nonatomic) {
555 spin_lock(&substream->self_group.lock); 568 down_read(&snd_pcm_link_rwsem);
569 mutex_lock(&substream->self_group.mutex);
570 } else {
571 read_lock_irq(&snd_pcm_link_rwlock);
572 spin_lock(&substream->self_group.lock);
573 }
556} 574}
557 575
558static inline void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream) 576static inline void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
559{ 577{
560 spin_unlock(&substream->self_group.lock); 578 if (substream->pcm->nonatomic) {
561 read_unlock_irq(&snd_pcm_link_rwlock); 579 mutex_unlock(&substream->self_group.mutex);
580 up_read(&snd_pcm_link_rwsem);
581 } else {
582 spin_unlock(&substream->self_group.lock);
583 read_unlock_irq(&snd_pcm_link_rwlock);
584 }
562} 585}
563 586
564#define snd_pcm_stream_lock_irqsave(substream, flags) \ 587#define snd_pcm_stream_lock_irqsave(substream, flags) \
565do { \ 588do { \
566 read_lock_irqsave(&snd_pcm_link_rwlock, (flags)); \ 589 if ((substream)->pcm->nonatomic) { \
567 spin_lock(&substream->self_group.lock); \ 590 (flags) = 0; /* XXX for avoid warning */ \
591 down_read(&snd_pcm_link_rwsem); \
592 mutex_lock(&(substream)->self_group.mutex); \
593 } else { \
594 read_lock_irqsave(&snd_pcm_link_rwlock, (flags)); \
595 spin_lock(&(substream)->self_group.lock); \
596 } \
568} while (0) 597} while (0)
569 598
570#define snd_pcm_stream_unlock_irqrestore(substream, flags) \ 599#define snd_pcm_stream_unlock_irqrestore(substream, flags) \
571do { \ 600do { \
572 spin_unlock(&substream->self_group.lock); \ 601 if ((substream)->pcm->nonatomic) { \
573 read_unlock_irqrestore(&snd_pcm_link_rwlock, (flags)); \ 602 mutex_unlock(&(substream)->self_group.mutex); \
603 up_read(&snd_pcm_link_rwsem); \
604 } else { \
605 spin_unlock(&(substream)->self_group.lock); \
606 read_unlock_irqrestore(&snd_pcm_link_rwlock, (flags)); \
607 } \
574} while (0) 608} while (0)
575 609
576#define snd_pcm_group_for_each_entry(s, substream) \ 610#define snd_pcm_group_for_each_entry(s, substream) \
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 43932e8dce66..afccdc553ef9 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -698,6 +698,7 @@ int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count)
698 } 698 }
699 substream->group = &substream->self_group; 699 substream->group = &substream->self_group;
700 spin_lock_init(&substream->self_group.lock); 700 spin_lock_init(&substream->self_group.lock);
701 mutex_init(&substream->self_group.mutex);
701 INIT_LIST_HEAD(&substream->self_group.substreams); 702 INIT_LIST_HEAD(&substream->self_group.substreams);
702 list_add_tail(&substream->link_list, &substream->self_group.substreams); 703 list_add_tail(&substream->link_list, &substream->self_group.substreams);
703 atomic_set(&substream->mmap_count, 0); 704 atomic_set(&substream->mmap_count, 0);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 8cd2f930ad0b..16d9b7e15f8b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -77,7 +77,8 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
77DEFINE_RWLOCK(snd_pcm_link_rwlock); 77DEFINE_RWLOCK(snd_pcm_link_rwlock);
78EXPORT_SYMBOL(snd_pcm_link_rwlock); 78EXPORT_SYMBOL(snd_pcm_link_rwlock);
79 79
80static DECLARE_RWSEM(snd_pcm_link_rwsem); 80DECLARE_RWSEM(snd_pcm_link_rwsem);
81EXPORT_SYMBOL(snd_pcm_link_rwsem);
81 82
82static inline mm_segment_t snd_enter_user(void) 83static inline mm_segment_t snd_enter_user(void)
83{ 84{
@@ -727,9 +728,14 @@ static int snd_pcm_action_group(struct action_ops *ops,
727 int res = 0; 728 int res = 0;
728 729
729 snd_pcm_group_for_each_entry(s, substream) { 730 snd_pcm_group_for_each_entry(s, substream) {
730 if (do_lock && s != substream) 731 if (do_lock && s != substream) {
731 spin_lock_nested(&s->self_group.lock, 732 if (s->pcm->nonatomic)
732 SINGLE_DEPTH_NESTING); 733 mutex_lock_nested(&s->self_group.mutex,
734 SINGLE_DEPTH_NESTING);
735 else
736 spin_lock_nested(&s->self_group.lock,
737 SINGLE_DEPTH_NESTING);
738 }
733 res = ops->pre_action(s, state); 739 res = ops->pre_action(s, state);
734 if (res < 0) 740 if (res < 0)
735 goto _unlock; 741 goto _unlock;
@@ -755,8 +761,12 @@ static int snd_pcm_action_group(struct action_ops *ops,
755 if (do_lock) { 761 if (do_lock) {
756 /* unlock streams */ 762 /* unlock streams */
757 snd_pcm_group_for_each_entry(s1, substream) { 763 snd_pcm_group_for_each_entry(s1, substream) {
758 if (s1 != substream) 764 if (s1 != substream) {
759 spin_unlock(&s1->self_group.lock); 765 if (s->pcm->nonatomic)
766 mutex_unlock(&s1->self_group.mutex);
767 else
768 spin_unlock(&s1->self_group.lock);
769 }
760 if (s1 == s) /* end */ 770 if (s1 == s) /* end */
761 break; 771 break;
762 } 772 }
@@ -784,6 +794,27 @@ static int snd_pcm_action_single(struct action_ops *ops,
784 return res; 794 return res;
785} 795}
786 796
797/* call in mutex-protected context */
798static int snd_pcm_action_mutex(struct action_ops *ops,
799 struct snd_pcm_substream *substream,
800 int state)
801{
802 int res;
803
804 if (snd_pcm_stream_linked(substream)) {
805 if (!mutex_trylock(&substream->group->mutex)) {
806 mutex_unlock(&substream->self_group.mutex);
807 mutex_lock(&substream->group->mutex);
808 mutex_lock(&substream->self_group.mutex);
809 }
810 res = snd_pcm_action_group(ops, substream, state, 1);
811 mutex_unlock(&substream->group->mutex);
812 } else {
813 res = snd_pcm_action_single(ops, substream, state);
814 }
815 return res;
816}
817
787/* 818/*
788 * Note: call with stream lock 819 * Note: call with stream lock
789 */ 820 */
@@ -793,6 +824,9 @@ static int snd_pcm_action(struct action_ops *ops,
793{ 824{
794 int res; 825 int res;
795 826
827 if (substream->pcm->nonatomic)
828 return snd_pcm_action_mutex(ops, substream, state);
829
796 if (snd_pcm_stream_linked(substream)) { 830 if (snd_pcm_stream_linked(substream)) {
797 if (!spin_trylock(&substream->group->lock)) { 831 if (!spin_trylock(&substream->group->lock)) {
798 spin_unlock(&substream->self_group.lock); 832 spin_unlock(&substream->self_group.lock);
@@ -807,6 +841,29 @@ static int snd_pcm_action(struct action_ops *ops,
807 return res; 841 return res;
808} 842}
809 843
844static int snd_pcm_action_lock_mutex(struct action_ops *ops,
845 struct snd_pcm_substream *substream,
846 int state)
847{
848 int res;
849
850 down_read(&snd_pcm_link_rwsem);
851 if (snd_pcm_stream_linked(substream)) {
852 mutex_lock(&substream->group->mutex);
853 mutex_lock_nested(&substream->self_group.mutex,
854 SINGLE_DEPTH_NESTING);
855 res = snd_pcm_action_group(ops, substream, state, 1);
856 mutex_unlock(&substream->self_group.mutex);
857 mutex_unlock(&substream->group->mutex);
858 } else {
859 mutex_lock(&substream->self_group.mutex);
860 res = snd_pcm_action_single(ops, substream, state);
861 mutex_unlock(&substream->self_group.mutex);
862 }
863 up_read(&snd_pcm_link_rwsem);
864 return res;
865}
866
810/* 867/*
811 * Note: don't use any locks before 868 * Note: don't use any locks before
812 */ 869 */
@@ -816,6 +873,9 @@ static int snd_pcm_action_lock_irq(struct action_ops *ops,
816{ 873{
817 int res; 874 int res;
818 875
876 if (substream->pcm->nonatomic)
877 return snd_pcm_action_lock_mutex(ops, substream, state);
878
819 read_lock_irq(&snd_pcm_link_rwlock); 879 read_lock_irq(&snd_pcm_link_rwlock);
820 if (snd_pcm_stream_linked(substream)) { 880 if (snd_pcm_stream_linked(substream)) {
821 spin_lock(&substream->group->lock); 881 spin_lock(&substream->group->lock);
@@ -1634,7 +1694,8 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1634 down_write(&snd_pcm_link_rwsem); 1694 down_write(&snd_pcm_link_rwsem);
1635 write_lock_irq(&snd_pcm_link_rwlock); 1695 write_lock_irq(&snd_pcm_link_rwlock);
1636 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || 1696 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1637 substream->runtime->status->state != substream1->runtime->status->state) { 1697 substream->runtime->status->state != substream1->runtime->status->state ||
1698 substream->pcm->nonatomic != substream1->pcm->nonatomic) {
1638 res = -EBADFD; 1699 res = -EBADFD;
1639 goto _end; 1700 goto _end;
1640 } 1701 }
@@ -1646,6 +1707,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1646 substream->group = group; 1707 substream->group = group;
1647 group = NULL; 1708 group = NULL;
1648 spin_lock_init(&substream->group->lock); 1709 spin_lock_init(&substream->group->lock);
1710 mutex_init(&substream->group->mutex);
1649 INIT_LIST_HEAD(&substream->group->substreams); 1711 INIT_LIST_HEAD(&substream->group->substreams);
1650 list_add_tail(&substream->link_list, &substream->group->substreams); 1712 list_add_tail(&substream->link_list, &substream->group->substreams);
1651 substream->group->count = 1; 1713 substream->group->count = 1;