aboutsummaryrefslogtreecommitdiffstats
path: root/include/sound/pcm.h
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2014-08-29 09:32:29 -0400
committerTakashi Iwai <tiwai@suse.de>2014-09-03 08:04:08 -0400
commit257f8cce5d40b811d229ed71602882baa0012808 (patch)
tree190a239257c625539a1b157acb3bd0075cdb61dc /include/sound/pcm.h
parent52addcf9d6669fa439387610bc65c92fa0980cef (diff)
ALSA: pcm: Allow nonatomic trigger operations
Currently, many PCM operations are performed in a critical section protected by spinlock, typically the trigger and pointer callbacks are assumed to be atomic. This is basically because some trigger action (e.g. PCM stop after drain or xrun) is done in the interrupt handler. If a driver runs in a threaded irq, however, this doesn't have to be atomic. And many devices want to handle trigger in a non-atomic context due to lengthy communications. This patch tries all PCM calls operational in non-atomic context. What it does is very simple: replaces the substream spinlock with the corresponding substream mutex when pcm->nonatomic flag is set. The driver that wants to use the non-atomic PCM ops just needs to set the flag and keep the rest as is. (Of course, it must not handle any PCM ops in irq context.) Note that the code doesn't check whether it's atomic-safe or not, but trust in 100% that the driver sets pcm->nonatomic correctly. One possible problem is the case where linked PCM substreams have inconsistent nonatomic states. For avoiding this, snd_pcm_link() returns an error if one tries to link an inconsistent PCM substream. Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'include/sound/pcm.h')
-rw-r--r--include/sound/pcm.h58
1 files changed, 46 insertions, 12 deletions
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 6f3e10ca0e32..bc79962f4aa6 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -365,6 +365,7 @@ struct snd_pcm_runtime {
365 365
366struct snd_pcm_group { /* keep linked substreams */ 366struct snd_pcm_group { /* keep linked substreams */
367 spinlock_t lock; 367 spinlock_t lock;
368 struct mutex mutex;
368 struct list_head substreams; 369 struct list_head substreams;
369 int count; 370 int count;
370}; 371};
@@ -460,6 +461,7 @@ struct snd_pcm {
460 void (*private_free) (struct snd_pcm *pcm); 461 void (*private_free) (struct snd_pcm *pcm);
461 struct device *dev; /* actual hw device this belongs to */ 462 struct device *dev; /* actual hw device this belongs to */
462 bool internal; /* pcm is for internal use only */ 463 bool internal; /* pcm is for internal use only */
464 bool nonatomic; /* whole PCM operations are in non-atomic context */
463#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE) 465#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
464 struct snd_pcm_oss oss; 466 struct snd_pcm_oss oss;
465#endif 467#endif
@@ -493,6 +495,7 @@ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree);
493 */ 495 */
494 496
495extern rwlock_t snd_pcm_link_rwlock; 497extern rwlock_t snd_pcm_link_rwlock;
498extern struct rw_semaphore snd_pcm_link_rwsem;
496 499
497int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info); 500int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info);
498int snd_pcm_info_user(struct snd_pcm_substream *substream, 501int snd_pcm_info_user(struct snd_pcm_substream *substream,
@@ -539,38 +542,69 @@ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
539 542
540static inline void snd_pcm_stream_lock(struct snd_pcm_substream *substream) 543static inline void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
541{ 544{
542 read_lock(&snd_pcm_link_rwlock); 545 if (substream->pcm->nonatomic) {
543 spin_lock(&substream->self_group.lock); 546 down_read(&snd_pcm_link_rwsem);
547 mutex_lock(&substream->self_group.mutex);
548 } else {
549 read_lock(&snd_pcm_link_rwlock);
550 spin_lock(&substream->self_group.lock);
551 }
544} 552}
545 553
546static inline void snd_pcm_stream_unlock(struct snd_pcm_substream *substream) 554static inline void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
547{ 555{
548 spin_unlock(&substream->self_group.lock); 556 if (substream->pcm->nonatomic) {
549 read_unlock(&snd_pcm_link_rwlock); 557 mutex_unlock(&substream->self_group.mutex);
558 up_read(&snd_pcm_link_rwsem);
559 } else {
560 spin_unlock(&substream->self_group.lock);
561 read_unlock(&snd_pcm_link_rwlock);
562 }
550} 563}
551 564
552static inline void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) 565static inline void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
553{ 566{
554 read_lock_irq(&snd_pcm_link_rwlock); 567 if (substream->pcm->nonatomic) {
555 spin_lock(&substream->self_group.lock); 568 down_read(&snd_pcm_link_rwsem);
569 mutex_lock(&substream->self_group.mutex);
570 } else {
571 read_lock_irq(&snd_pcm_link_rwlock);
572 spin_lock(&substream->self_group.lock);
573 }
556} 574}
557 575
558static inline void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream) 576static inline void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
559{ 577{
560 spin_unlock(&substream->self_group.lock); 578 if (substream->pcm->nonatomic) {
561 read_unlock_irq(&snd_pcm_link_rwlock); 579 mutex_unlock(&substream->self_group.mutex);
580 up_read(&snd_pcm_link_rwsem);
581 } else {
582 spin_unlock(&substream->self_group.lock);
583 read_unlock_irq(&snd_pcm_link_rwlock);
584 }
562} 585}
563 586
564#define snd_pcm_stream_lock_irqsave(substream, flags) \ 587#define snd_pcm_stream_lock_irqsave(substream, flags) \
565do { \ 588do { \
566 read_lock_irqsave(&snd_pcm_link_rwlock, (flags)); \ 589 if ((substream)->pcm->nonatomic) { \
567 spin_lock(&substream->self_group.lock); \ 590 (flags) = 0; /* XXX for avoid warning */ \
591 down_read(&snd_pcm_link_rwsem); \
592 mutex_lock(&(substream)->self_group.mutex); \
593 } else { \
594 read_lock_irqsave(&snd_pcm_link_rwlock, (flags)); \
595 spin_lock(&(substream)->self_group.lock); \
596 } \
568} while (0) 597} while (0)
569 598
570#define snd_pcm_stream_unlock_irqrestore(substream, flags) \ 599#define snd_pcm_stream_unlock_irqrestore(substream, flags) \
571do { \ 600do { \
572 spin_unlock(&substream->self_group.lock); \ 601 if ((substream)->pcm->nonatomic) { \
573 read_unlock_irqrestore(&snd_pcm_link_rwlock, (flags)); \ 602 mutex_unlock(&(substream)->self_group.mutex); \
603 up_read(&snd_pcm_link_rwsem); \
604 } else { \
605 spin_unlock(&(substream)->self_group.lock); \
606 read_unlock_irqrestore(&snd_pcm_link_rwlock, (flags)); \
607 } \
574} while (0) 608} while (0)
575 609
576#define snd_pcm_group_for_each_entry(s, substream) \ 610#define snd_pcm_group_for_each_entry(s, substream) \