aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/rcutiny.h5
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--kernel/rcutree_plugin.h71
-rw-r--r--kernel/sched.c69
5 files changed, 77 insertions, 70 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 7142ee3304ab..49e8e16308e1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -66,7 +66,6 @@ extern void call_rcu_sched(struct rcu_head *head,
66extern void synchronize_sched(void); 66extern void synchronize_sched(void);
67extern void rcu_barrier_bh(void); 67extern void rcu_barrier_bh(void);
68extern void rcu_barrier_sched(void); 68extern void rcu_barrier_sched(void);
69extern void synchronize_sched_expedited(void);
70extern int sched_expedited_torture_stats(char *page); 69extern int sched_expedited_torture_stats(char *page);
71 70
72static inline void __rcu_read_lock_bh(void) 71static inline void __rcu_read_lock_bh(void)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index ea025a611fcc..30ebd7c8d874 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -60,6 +60,11 @@ static inline void synchronize_rcu_bh_expedited(void)
60 synchronize_sched(); 60 synchronize_sched();
61} 61}
62 62
63static inline void synchronize_sched_expedited(void)
64{
65 synchronize_sched();
66}
67
63#ifdef CONFIG_TINY_RCU 68#ifdef CONFIG_TINY_RCU
64 69
65static inline void rcu_preempt_note_context_switch(void) 70static inline void rcu_preempt_note_context_switch(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index c0e96833aa73..3a933482734a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -48,6 +48,7 @@ static inline void exit_rcu(void)
48#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 48#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
49 49
50extern void synchronize_rcu_bh(void); 50extern void synchronize_rcu_bh(void);
51extern void synchronize_sched_expedited(void);
51extern void synchronize_rcu_expedited(void); 52extern void synchronize_rcu_expedited(void);
52 53
53static inline void synchronize_rcu_bh_expedited(void) 54static inline void synchronize_rcu_bh_expedited(void)
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 71a4147473f9..21df7f3e7273 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/stop_machine.h>
28 29
29/* 30/*
30 * Check the RCU kernel configuration parameters and print informative 31 * Check the RCU kernel configuration parameters and print informative
@@ -1014,6 +1015,76 @@ static void __init __rcu_init_preempt(void)
1014 1015
1015#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 1016#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1016 1017
1018#ifndef CONFIG_SMP
1019
1020void synchronize_sched_expedited(void)
1021{
1022 cond_resched();
1023}
1024EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1025
1026#else /* #ifndef CONFIG_SMP */
1027
1028static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
1029
1030static int synchronize_sched_expedited_cpu_stop(void *data)
1031{
1032 /*
1033 * There must be a full memory barrier on each affected CPU
1034 * between the time that try_stop_cpus() is called and the
1035 * time that it returns.
1036 *
1037 * In the current initial implementation of cpu_stop, the
1038 * above condition is already met when the control reaches
1039 * this point and the following smp_mb() is not strictly
1040 * necessary. Do smp_mb() anyway for documentation and
1041 * robustness against future implementation changes.
1042 */
1043 smp_mb(); /* See above comment block. */
1044 return 0;
1045}
1046
1047/*
1048 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1049 * approach to force grace period to end quickly. This consumes
1050 * significant time on all CPUs, and is thus not recommended for
1051 * any sort of common-case code.
1052 *
1053 * Note that it is illegal to call this function while holding any
1054 * lock that is acquired by a CPU-hotplug notifier. Failing to
1055 * observe this restriction will result in deadlock.
1056 */
1057void synchronize_sched_expedited(void)
1058{
1059 int snap, trycount = 0;
1060
1061 smp_mb(); /* ensure prior mod happens before capturing snap. */
1062 snap = atomic_read(&synchronize_sched_expedited_count) + 1;
1063 get_online_cpus();
1064 while (try_stop_cpus(cpu_online_mask,
1065 synchronize_sched_expedited_cpu_stop,
1066 NULL) == -EAGAIN) {
1067 put_online_cpus();
1068 if (trycount++ < 10)
1069 udelay(trycount * num_online_cpus());
1070 else {
1071 synchronize_sched();
1072 return;
1073 }
1074 if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
1075 smp_mb(); /* ensure test happens before caller kfree */
1076 return;
1077 }
1078 get_online_cpus();
1079 }
1080 atomic_inc(&synchronize_sched_expedited_count);
1081 smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
1082 put_online_cpus();
1083}
1084EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1085
1086#endif /* #else #ifndef CONFIG_SMP */
1087
1017#if !defined(CONFIG_RCU_FAST_NO_HZ) 1088#if !defined(CONFIG_RCU_FAST_NO_HZ)
1018 1089
1019/* 1090/*
diff --git a/kernel/sched.c b/kernel/sched.c
index ae8f75a5ceb4..d1e8889872a1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -9131,72 +9131,3 @@ struct cgroup_subsys cpuacct_subsys = {
9131}; 9131};
9132#endif /* CONFIG_CGROUP_CPUACCT */ 9132#endif /* CONFIG_CGROUP_CPUACCT */
9133 9133
9134#ifndef CONFIG_SMP
9135
9136void synchronize_sched_expedited(void)
9137{
9138 barrier();
9139}
9140EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9141
9142#else /* #ifndef CONFIG_SMP */
9143
9144static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
9145
9146static int synchronize_sched_expedited_cpu_stop(void *data)
9147{
9148 /*
9149 * There must be a full memory barrier on each affected CPU
9150 * between the time that try_stop_cpus() is called and the
9151 * time that it returns.
9152 *
9153 * In the current initial implementation of cpu_stop, the
9154 * above condition is already met when the control reaches
9155 * this point and the following smp_mb() is not strictly
9156 * necessary. Do smp_mb() anyway for documentation and
9157 * robustness against future implementation changes.
9158 */
9159 smp_mb(); /* See above comment block. */
9160 return 0;
9161}
9162
9163/*
9164 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
9165 * approach to force grace period to end quickly. This consumes
9166 * significant time on all CPUs, and is thus not recommended for
9167 * any sort of common-case code.
9168 *
9169 * Note that it is illegal to call this function while holding any
9170 * lock that is acquired by a CPU-hotplug notifier. Failing to
9171 * observe this restriction will result in deadlock.
9172 */
9173void synchronize_sched_expedited(void)
9174{
9175 int snap, trycount = 0;
9176
9177 smp_mb(); /* ensure prior mod happens before capturing snap. */
9178 snap = atomic_read(&synchronize_sched_expedited_count) + 1;
9179 get_online_cpus();
9180 while (try_stop_cpus(cpu_online_mask,
9181 synchronize_sched_expedited_cpu_stop,
9182 NULL) == -EAGAIN) {
9183 put_online_cpus();
9184 if (trycount++ < 10)
9185 udelay(trycount * num_online_cpus());
9186 else {
9187 synchronize_sched();
9188 return;
9189 }
9190 if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
9191 smp_mb(); /* ensure test happens before caller kfree */
9192 return;
9193 }
9194 get_online_cpus();
9195 }
9196 atomic_inc(&synchronize_sched_expedited_count);
9197 smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
9198 put_online_cpus();
9199}
9200EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9201
9202#endif /* #else #ifndef CONFIG_SMP */