aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-04-15 19:44:07 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-06-14 19:01:42 -0400
commit40e0a6cfd53e37d9b8863cdbc0adb1f72e9311e7 (patch)
tree2cf31648b111d26fbecd8e3f081dc37e41a69b5a
parent3549c2bc2c4ea8ecfeb9d21cb81cb00c6002b011 (diff)
rcu: Move expedited code from tree_plugin.h to tree_exp.h
People have been having some difficulty finding their way around the RCU code. This commit therefore pulls some of the expedited grace-period code from tree_plugin.h to a new tree_exp.h file. This commit is strictly code movement. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcu/tree_exp.h94
-rw-r--r--kernel/rcu/tree_plugin.h88
2 files changed, 94 insertions, 88 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index db0909cf7fe1..00a02a231ada 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -562,3 +562,97 @@ void synchronize_sched_expedited(void)
562 rcu_exp_wait_wake(rsp, s); 562 rcu_exp_wait_wake(rsp, s);
563} 563}
564EXPORT_SYMBOL_GPL(synchronize_sched_expedited); 564EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
565
566#ifdef CONFIG_PREEMPT_RCU
567
568/*
569 * Remote handler for smp_call_function_single(). If there is an
570 * RCU read-side critical section in effect, request that the
571 * next rcu_read_unlock() record the quiescent state up the
572 * ->expmask fields in the rcu_node tree. Otherwise, immediately
573 * report the quiescent state.
574 */
575static void sync_rcu_exp_handler(void *info)
576{
577 struct rcu_data *rdp;
578 struct rcu_state *rsp = info;
579 struct task_struct *t = current;
580
581 /*
582 * Within an RCU read-side critical section, request that the next
583 * rcu_read_unlock() report. Unless this RCU read-side critical
584 * section has already blocked, in which case it is already set
585 * up for the expedited grace period to wait on it.
586 */
587 if (t->rcu_read_lock_nesting > 0 &&
588 !t->rcu_read_unlock_special.b.blocked) {
589 t->rcu_read_unlock_special.b.exp_need_qs = true;
590 return;
591 }
592
593 /*
594 * We are either exiting an RCU read-side critical section (negative
595 * values of t->rcu_read_lock_nesting) or are not in one at all
596 * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU
597 * read-side critical section that blocked before this expedited
598 * grace period started. Either way, we can immediately report
599 * the quiescent state.
600 */
601 rdp = this_cpu_ptr(rsp->rda);
602 rcu_report_exp_rdp(rsp, rdp, true);
603}
604
605/**
606 * synchronize_rcu_expedited - Brute-force RCU grace period
607 *
608 * Wait for an RCU-preempt grace period, but expedite it. The basic
609 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
610 * checks whether the CPU is in an RCU-preempt critical section, and
611 * if so, it sets a flag that causes the outermost rcu_read_unlock()
612 * to report the quiescent state. On the other hand, if the CPU is
613 * not in an RCU read-side critical section, the IPI handler reports
614 * the quiescent state immediately.
615 *
616 * Although this is a greate improvement over previous expedited
617 * implementations, it is still unfriendly to real-time workloads, so is
618 * thus not recommended for any sort of common-case code. In fact, if
619 * you are using synchronize_rcu_expedited() in a loop, please restructure
620 * your code to batch your updates, and then Use a single synchronize_rcu()
621 * instead.
622 */
623void synchronize_rcu_expedited(void)
624{
625 struct rcu_state *rsp = rcu_state_p;
626 unsigned long s;
627
628 /* If expedited grace periods are prohibited, fall back to normal. */
629 if (rcu_gp_is_normal()) {
630 wait_rcu_gp(call_rcu);
631 return;
632 }
633
634 s = rcu_exp_gp_seq_snap(rsp);
635 if (exp_funnel_lock(rsp, s))
636 return; /* Someone else did our work for us. */
637
638 /* Initialize the rcu_node tree in preparation for the wait. */
639 sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
640
641 /* Wait for ->blkd_tasks lists to drain, then wake everyone up. */
642 rcu_exp_wait_wake(rsp, s);
643}
644EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
645
646#else /* #ifdef CONFIG_PREEMPT_RCU */
647
648/*
649 * Wait for an rcu-preempt grace period, but make it happen quickly.
650 * But because preemptible RCU does not exist, map to rcu-sched.
651 */
652void synchronize_rcu_expedited(void)
653{
654 synchronize_sched_expedited();
655}
656EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
657
658#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index ff1cd4e1188d..695071dd1e9c 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -681,84 +681,6 @@ void synchronize_rcu(void)
681} 681}
682EXPORT_SYMBOL_GPL(synchronize_rcu); 682EXPORT_SYMBOL_GPL(synchronize_rcu);
683 683
684/*
685 * Remote handler for smp_call_function_single(). If there is an
686 * RCU read-side critical section in effect, request that the
687 * next rcu_read_unlock() record the quiescent state up the
688 * ->expmask fields in the rcu_node tree. Otherwise, immediately
689 * report the quiescent state.
690 */
691static void sync_rcu_exp_handler(void *info)
692{
693 struct rcu_data *rdp;
694 struct rcu_state *rsp = info;
695 struct task_struct *t = current;
696
697 /*
698 * Within an RCU read-side critical section, request that the next
699 * rcu_read_unlock() report. Unless this RCU read-side critical
700 * section has already blocked, in which case it is already set
701 * up for the expedited grace period to wait on it.
702 */
703 if (t->rcu_read_lock_nesting > 0 &&
704 !t->rcu_read_unlock_special.b.blocked) {
705 t->rcu_read_unlock_special.b.exp_need_qs = true;
706 return;
707 }
708
709 /*
710 * We are either exiting an RCU read-side critical section (negative
711 * values of t->rcu_read_lock_nesting) or are not in one at all
712 * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU
713 * read-side critical section that blocked before this expedited
714 * grace period started. Either way, we can immediately report
715 * the quiescent state.
716 */
717 rdp = this_cpu_ptr(rsp->rda);
718 rcu_report_exp_rdp(rsp, rdp, true);
719}
720
721/**
722 * synchronize_rcu_expedited - Brute-force RCU grace period
723 *
724 * Wait for an RCU-preempt grace period, but expedite it. The basic
725 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
726 * checks whether the CPU is in an RCU-preempt critical section, and
727 * if so, it sets a flag that causes the outermost rcu_read_unlock()
728 * to report the quiescent state. On the other hand, if the CPU is
729 * not in an RCU read-side critical section, the IPI handler reports
730 * the quiescent state immediately.
731 *
732 * Although this is a greate improvement over previous expedited
733 * implementations, it is still unfriendly to real-time workloads, so is
734 * thus not recommended for any sort of common-case code. In fact, if
735 * you are using synchronize_rcu_expedited() in a loop, please restructure
736 * your code to batch your updates, and then Use a single synchronize_rcu()
737 * instead.
738 */
739void synchronize_rcu_expedited(void)
740{
741 struct rcu_state *rsp = rcu_state_p;
742 unsigned long s;
743
744 /* If expedited grace periods are prohibited, fall back to normal. */
745 if (rcu_gp_is_normal()) {
746 wait_rcu_gp(call_rcu);
747 return;
748 }
749
750 s = rcu_exp_gp_seq_snap(rsp);
751 if (exp_funnel_lock(rsp, s))
752 return; /* Someone else did our work for us. */
753
754 /* Initialize the rcu_node tree in preparation for the wait. */
755 sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
756
757 /* Wait for ->blkd_tasks lists to drain, then wake everyone up. */
758 rcu_exp_wait_wake(rsp, s);
759}
760EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
761
762/** 684/**
763 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 685 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
764 * 686 *
@@ -883,16 +805,6 @@ static void rcu_preempt_check_callbacks(void)
883} 805}
884 806
885/* 807/*
886 * Wait for an rcu-preempt grace period, but make it happen quickly.
887 * But because preemptible RCU does not exist, map to rcu-sched.
888 */
889void synchronize_rcu_expedited(void)
890{
891 synchronize_sched_expedited();
892}
893EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
894
895/*
896 * Because preemptible RCU does not exist, rcu_barrier() is just 808 * Because preemptible RCU does not exist, rcu_barrier() is just
897 * another name for rcu_barrier_sched(). 809 * another name for rcu_barrier_sched().
898 */ 810 */