diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-02-26 19:38:56 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-02-27 03:53:52 -0500 |
commit | a47cd880b50e14b0b6f5e9d426ae9a2676c9c474 (patch) | |
tree | c134dcec52450ec92ea853f4aeeef8e3967a36cb /kernel/rcutree_plugin.h | |
parent | f5f654096487c6d526c47bb66308f9de81f091cf (diff) |
rcu: Fix accelerated grace periods for last non-dynticked CPU
It is invalid to invoke __rcu_process_callbacks() with irqs
disabled, so do it indirectly via raise_softirq(). This
requires a state-machine implementation to cycle through the
grace-period machinery the required number of times.
Located-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1267231138-27856-1-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 73 |
1 files changed, 53 insertions, 20 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 3516de7091a1..ed241fc478f0 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -973,9 +973,19 @@ int rcu_needs_cpu(int cpu) | |||
973 | return rcu_needs_cpu_quick_check(cpu); | 973 | return rcu_needs_cpu_quick_check(cpu); |
974 | } | 974 | } |
975 | 975 | ||
976 | /* | ||
977 | * Check to see if we need to continue a callback-flush operations to | ||
978 | * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle | ||
979 | * entry is not configured, so we never do need to. | ||
980 | */ | ||
981 | static void rcu_needs_cpu_flush(void) | ||
982 | { | ||
983 | } | ||
984 | |||
976 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 985 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
977 | 986 | ||
978 | #define RCU_NEEDS_CPU_FLUSHES 5 | 987 | #define RCU_NEEDS_CPU_FLUSHES 5 |
988 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | ||
979 | 989 | ||
980 | /* | 990 | /* |
981 | * Check to see if any future RCU-related work will need to be done | 991 | * Check to see if any future RCU-related work will need to be done |
@@ -988,39 +998,62 @@ int rcu_needs_cpu(int cpu) | |||
988 | * only if all other CPUs are already in dynticks-idle mode. This will | 998 | * only if all other CPUs are already in dynticks-idle mode. This will |
989 | * allow the CPU cores to be powered down immediately, as opposed to after | 999 | * allow the CPU cores to be powered down immediately, as opposed to after |
990 | * waiting many milliseconds for grace periods to elapse. | 1000 | * waiting many milliseconds for grace periods to elapse. |
1001 | * | ||
1002 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | ||
1003 | * disabled, we do one pass of force_quiescent_state(), then do a | ||
1004 | * raise_softirq() to cause rcu_process_callbacks() to be invoked later. | ||
1005 | * The per-cpu rcu_dyntick_drain variable controls the sequencing. | ||
991 | */ | 1006 | */ |
992 | int rcu_needs_cpu(int cpu) | 1007 | int rcu_needs_cpu(int cpu) |
993 | { | 1008 | { |
994 | int c = 1; | 1009 | int c = 0; |
995 | int i; | ||
996 | int thatcpu; | 1010 | int thatcpu; |
997 | 1011 | ||
998 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ | 1012 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ |
999 | for_each_cpu_not(thatcpu, nohz_cpu_mask) | 1013 | for_each_cpu_not(thatcpu, nohz_cpu_mask) |
1000 | if (thatcpu != cpu) | 1014 | if (thatcpu != cpu) { |
1015 | per_cpu(rcu_dyntick_drain, cpu) = 0; | ||
1001 | return rcu_needs_cpu_quick_check(cpu); | 1016 | return rcu_needs_cpu_quick_check(cpu); |
1002 | |||
1003 | /* Try to push remaining RCU-sched and RCU-bh callbacks through. */ | ||
1004 | for (i = 0; i < RCU_NEEDS_CPU_FLUSHES && c; i++) { | ||
1005 | c = 0; | ||
1006 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { | ||
1007 | rcu_sched_qs(cpu); | ||
1008 | force_quiescent_state(&rcu_sched_state, 0); | ||
1009 | __rcu_process_callbacks(&rcu_sched_state, | ||
1010 | &per_cpu(rcu_sched_data, cpu)); | ||
1011 | c = !!per_cpu(rcu_sched_data, cpu).nxtlist; | ||
1012 | } | ||
1013 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { | ||
1014 | rcu_bh_qs(cpu); | ||
1015 | force_quiescent_state(&rcu_bh_state, 0); | ||
1016 | __rcu_process_callbacks(&rcu_bh_state, | ||
1017 | &per_cpu(rcu_bh_data, cpu)); | ||
1018 | c = !!per_cpu(rcu_bh_data, cpu).nxtlist; | ||
1019 | } | 1017 | } |
1018 | |||
1019 | /* Check and update the rcu_dyntick_drain sequencing. */ | ||
1020 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | ||
1021 | /* First time through, initialize the counter. */ | ||
1022 | per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; | ||
1023 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | ||
1024 | /* We have hit the limit, so time to give up. */ | ||
1025 | return rcu_needs_cpu_quick_check(cpu); | ||
1026 | } | ||
1027 | |||
1028 | /* Do one step pushing remaining RCU callbacks through. */ | ||
1029 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { | ||
1030 | rcu_sched_qs(cpu); | ||
1031 | force_quiescent_state(&rcu_sched_state, 0); | ||
1032 | c = c || per_cpu(rcu_sched_data, cpu).nxtlist; | ||
1033 | } | ||
1034 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { | ||
1035 | rcu_bh_qs(cpu); | ||
1036 | force_quiescent_state(&rcu_bh_state, 0); | ||
1037 | c = c || per_cpu(rcu_bh_data, cpu).nxtlist; | ||
1020 | } | 1038 | } |
1021 | 1039 | ||
1022 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | 1040 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ |
1041 | if (c) | ||
1042 | raise_softirq(RCU_SOFTIRQ); | ||
1023 | return c; | 1043 | return c; |
1024 | } | 1044 | } |
1025 | 1045 | ||
1046 | /* | ||
1047 | * Check to see if we need to continue a callback-flush operations to | ||
1048 | * allow the last CPU to enter dyntick-idle mode. | ||
1049 | */ | ||
1050 | static void rcu_needs_cpu_flush(void) | ||
1051 | { | ||
1052 | int cpu = smp_processor_id(); | ||
1053 | |||
1054 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) | ||
1055 | return; | ||
1056 | (void)rcu_needs_cpu(cpu); | ||
1057 | } | ||
1058 | |||
1026 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 1059 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |