diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2010-10-20 23:29:05 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-11-30 01:01:58 -0500 |
commit | 7b27d5475f86186914e54e4a6bb994e9a985337b (patch) | |
tree | 36349c88d75497db89956d6721d82c4f889cbc2b /kernel/rcutree_plugin.h | |
parent | deb7a41815a8a32d4f9ea2af7a48ed1175222cec (diff) |
rcu,cleanup: move synchronize_sched_expedited() out of sched.c
The first version of synchronize_sched_expedited() used the migration
code in the scheduler, and was therefore implemented in kernel/sched.c.
However, the more recent version of this code no longer uses the
migration code, so this commit moves it to the main RCU source files.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 71 |
1 files changed, 71 insertions, 0 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 71a4147473f9..21df7f3e7273 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -25,6 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/stop_machine.h> | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * Check the RCU kernel configuration parameters and print informative | 31 | * Check the RCU kernel configuration parameters and print informative |
@@ -1014,6 +1015,76 @@ static void __init __rcu_init_preempt(void) | |||
1014 | 1015 | ||
1015 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1016 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1016 | 1017 | ||
1018 | #ifndef CONFIG_SMP | ||
1019 | |||
1020 | void synchronize_sched_expedited(void) | ||
1021 | { | ||
1022 | cond_resched(); | ||
1023 | } | ||
1024 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
1025 | |||
1026 | #else /* #ifndef CONFIG_SMP */ | ||
1027 | |||
1028 | static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0); | ||
1029 | |||
1030 | static int synchronize_sched_expedited_cpu_stop(void *data) | ||
1031 | { | ||
1032 | /* | ||
1033 | * There must be a full memory barrier on each affected CPU | ||
1034 | * between the time that try_stop_cpus() is called and the | ||
1035 | * time that it returns. | ||
1036 | * | ||
1037 | * In the current initial implementation of cpu_stop, the | ||
1038 | * above condition is already met when the control reaches | ||
1039 | * this point and the following smp_mb() is not strictly | ||
1040 | * necessary. Do smp_mb() anyway for documentation and | ||
1041 | * robustness against future implementation changes. | ||
1042 | */ | ||
1043 | smp_mb(); /* See above comment block. */ | ||
1044 | return 0; | ||
1045 | } | ||
1046 | |||
1047 | /* | ||
1048 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | ||
1049 | * approach to force grace period to end quickly. This consumes | ||
1050 | * significant time on all CPUs, and is thus not recommended for | ||
1051 | * any sort of common-case code. | ||
1052 | * | ||
1053 | * Note that it is illegal to call this function while holding any | ||
1054 | * lock that is acquired by a CPU-hotplug notifier. Failing to | ||
1055 | * observe this restriction will result in deadlock. | ||
1056 | */ | ||
1057 | void synchronize_sched_expedited(void) | ||
1058 | { | ||
1059 | int snap, trycount = 0; | ||
1060 | |||
1061 | smp_mb(); /* ensure prior mod happens before capturing snap. */ | ||
1062 | snap = atomic_read(&synchronize_sched_expedited_count) + 1; | ||
1063 | get_online_cpus(); | ||
1064 | while (try_stop_cpus(cpu_online_mask, | ||
1065 | synchronize_sched_expedited_cpu_stop, | ||
1066 | NULL) == -EAGAIN) { | ||
1067 | put_online_cpus(); | ||
1068 | if (trycount++ < 10) | ||
1069 | udelay(trycount * num_online_cpus()); | ||
1070 | else { | ||
1071 | synchronize_sched(); | ||
1072 | return; | ||
1073 | } | ||
1074 | if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) { | ||
1075 | smp_mb(); /* ensure test happens before caller kfree */ | ||
1076 | return; | ||
1077 | } | ||
1078 | get_online_cpus(); | ||
1079 | } | ||
1080 | atomic_inc(&synchronize_sched_expedited_count); | ||
1081 | smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */ | ||
1082 | put_online_cpus(); | ||
1083 | } | ||
1084 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
1085 | |||
1086 | #endif /* #else #ifndef CONFIG_SMP */ | ||
1087 | |||
1017 | #if !defined(CONFIG_RCU_FAST_NO_HZ) | 1088 | #if !defined(CONFIG_RCU_FAST_NO_HZ) |
1018 | 1089 | ||
1019 | /* | 1090 | /* |