aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/rcutree_plugin.h18
1 files changed, 16 insertions, 2 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 643c8f650dd0..c22c4ef2a0d0 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1041,6 +1041,8 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
1041 * robustness against future implementation changes. 1041 * robustness against future implementation changes.
1042 */ 1042 */
1043 smp_mb(); /* See above comment block. */ 1043 smp_mb(); /* See above comment block. */
1044 if (cpumask_first(cpu_online_mask) == smp_processor_id())
1045 atomic_inc(&synchronize_sched_expedited_count);
1044 return 0; 1046 return 0;
1045} 1047}
1046 1048
@@ -1053,13 +1055,26 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
1053 * Note that it is illegal to call this function while holding any 1055 * Note that it is illegal to call this function while holding any
1054 * lock that is acquired by a CPU-hotplug notifier. Failing to 1056 * lock that is acquired by a CPU-hotplug notifier. Failing to
1055 * observe this restriction will result in deadlock. 1057 * observe this restriction will result in deadlock.
1058 *
1059 * The synchronize_sched_expedited_cpu_stop() function is called
1060 * in stop-CPU context, but in order to keep overhead down to a dull
1061 * roar, we don't force this function to wait for its counterparts
1062 * on other CPUs. One instance of this function will increment the
1063 * synchronize_sched_expedited_count variable per call to
1064 * try_stop_cpus(), but there is no guarantee what order this instance
1065 * will occur in. The worst case is that it is last on one call
1066 * to try_stop_cpus(), and the first on the next call. This means
1067 * that piggybacking requires that synchronize_sched_expedited_count
1068 * be incremented by 3: this guarantees that the piggybacking
1069 * task has waited through an entire cycle of context switches,
1070 * even in the worst case.
1056 */ 1071 */
1057void synchronize_sched_expedited(void) 1072void synchronize_sched_expedited(void)
1058{ 1073{
1059 int snap, trycount = 0; 1074 int snap, trycount = 0;
1060 1075
1061 smp_mb(); /* ensure prior mod happens before capturing snap. */ 1076 smp_mb(); /* ensure prior mod happens before capturing snap. */
1062 snap = atomic_read(&synchronize_sched_expedited_count) + 1; 1077 snap = atomic_read(&synchronize_sched_expedited_count) + 2;
1063 get_online_cpus(); 1078 get_online_cpus();
1064 while (try_stop_cpus(cpu_online_mask, 1079 while (try_stop_cpus(cpu_online_mask,
1065 synchronize_sched_expedited_cpu_stop, 1080 synchronize_sched_expedited_cpu_stop,
@@ -1077,7 +1092,6 @@ void synchronize_sched_expedited(void)
1077 } 1092 }
1078 get_online_cpus(); 1093 get_online_cpus();
1079 } 1094 }
1080 atomic_inc(&synchronize_sched_expedited_count);
1081 smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */ 1095 smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
1082 put_online_cpus(); 1096 put_online_cpus();
1083} 1097}