aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutiny_plugin.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutiny_plugin.h')
-rw-r--r--kernel/rcutiny_plugin.h56
1 files changed, 5 insertions, 51 deletions
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index fc31a2d65100..918fd1e8509c 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -132,7 +132,6 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
132 RCU_TRACE(.rcb.name = "rcu_preempt") 132 RCU_TRACE(.rcb.name = "rcu_preempt")
133}; 133};
134 134
135static void rcu_read_unlock_special(struct task_struct *t);
136static int rcu_preempted_readers_exp(void); 135static int rcu_preempted_readers_exp(void);
137static void rcu_report_exp_done(void); 136static void rcu_report_exp_done(void);
138 137
@@ -351,8 +350,9 @@ static int rcu_initiate_boost(void)
351 rcu_preempt_ctrlblk.boost_tasks = 350 rcu_preempt_ctrlblk.boost_tasks =
352 rcu_preempt_ctrlblk.gp_tasks; 351 rcu_preempt_ctrlblk.gp_tasks;
353 invoke_rcu_callbacks(); 352 invoke_rcu_callbacks();
354 } else 353 } else {
355 RCU_TRACE(rcu_initiate_boost_trace()); 354 RCU_TRACE(rcu_initiate_boost_trace());
355 }
356 return 1; 356 return 1;
357} 357}
358 358
@@ -527,23 +527,11 @@ void rcu_preempt_note_context_switch(void)
527} 527}
528 528
529/* 529/*
530 * Tiny-preemptible RCU implementation for rcu_read_lock().
531 * Just increment ->rcu_read_lock_nesting, shared state will be updated
532 * if we block.
533 */
534void __rcu_read_lock(void)
535{
536 current->rcu_read_lock_nesting++;
537 barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
538}
539EXPORT_SYMBOL_GPL(__rcu_read_lock);
540
541/*
542 * Handle special cases during rcu_read_unlock(), such as needing to 530 * Handle special cases during rcu_read_unlock(), such as needing to
543 * notify RCU core processing or task having blocked during the RCU 531 * notify RCU core processing or task having blocked during the RCU
544 * read-side critical section. 532 * read-side critical section.
545 */ 533 */
546static noinline void rcu_read_unlock_special(struct task_struct *t) 534void rcu_read_unlock_special(struct task_struct *t)
547{ 535{
548 int empty; 536 int empty;
549 int empty_exp; 537 int empty_exp;
@@ -627,38 +615,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
627} 615}
628 616
629/* 617/*
630 * Tiny-preemptible RCU implementation for rcu_read_unlock().
631 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
632 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
633 * invoke rcu_read_unlock_special() to clean up after a context switch
634 * in an RCU read-side critical section and other special cases.
635 */
636void __rcu_read_unlock(void)
637{
638 struct task_struct *t = current;
639
640 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
641 if (t->rcu_read_lock_nesting != 1)
642 --t->rcu_read_lock_nesting;
643 else {
644 t->rcu_read_lock_nesting = INT_MIN;
645 barrier(); /* assign before ->rcu_read_unlock_special load */
646 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
647 rcu_read_unlock_special(t);
648 barrier(); /* ->rcu_read_unlock_special load before assign */
649 t->rcu_read_lock_nesting = 0;
650 }
651#ifdef CONFIG_PROVE_LOCKING
652 {
653 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
654
655 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
656 }
657#endif /* #ifdef CONFIG_PROVE_LOCKING */
658}
659EXPORT_SYMBOL_GPL(__rcu_read_unlock);
660
661/*
662 * Check for a quiescent state from the current CPU. When a task blocks, 618 * Check for a quiescent state from the current CPU. When a task blocks,
663 * the task is recorded in the rcu_preempt_ctrlblk structure, which is 619 * the task is recorded in the rcu_preempt_ctrlblk structure, which is
664 * checked elsewhere. This is called from the scheduling-clock interrupt. 620 * checked elsewhere. This is called from the scheduling-clock interrupt.
@@ -823,9 +779,9 @@ void synchronize_rcu_expedited(void)
823 rpcp->exp_tasks = NULL; 779 rpcp->exp_tasks = NULL;
824 780
825 /* Wait for tail of ->blkd_tasks list to drain. */ 781 /* Wait for tail of ->blkd_tasks list to drain. */
826 if (!rcu_preempted_readers_exp()) 782 if (!rcu_preempted_readers_exp()) {
827 local_irq_restore(flags); 783 local_irq_restore(flags);
828 else { 784 } else {
829 rcu_initiate_boost(); 785 rcu_initiate_boost();
830 local_irq_restore(flags); 786 local_irq_restore(flags);
831 wait_event(sync_rcu_preempt_exp_wq, 787 wait_event(sync_rcu_preempt_exp_wq,
@@ -846,8 +802,6 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
846 */ 802 */
847int rcu_preempt_needs_cpu(void) 803int rcu_preempt_needs_cpu(void)
848{ 804{
849 if (!rcu_preempt_running_reader())
850 rcu_preempt_cpu_qs();
851 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL; 805 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
852} 806}
853 807