diff options
-rw-r--r-- | kernel/rcutree.c | 3 | ||||
-rw-r--r-- | kernel/rcutree.h | 1 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 73 |
3 files changed, 57 insertions, 20 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 335bfe4f0076..3ec8160fc75f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1341,6 +1341,9 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
1341 | * grace-period manipulations above. | 1341 | * grace-period manipulations above. |
1342 | */ | 1342 | */ |
1343 | smp_mb(); /* See above block comment. */ | 1343 | smp_mb(); /* See above block comment. */ |
1344 | |||
1345 | /* If we are last CPU on way to dyntick-idle mode, accelerate it. */ | ||
1346 | rcu_needs_cpu_flush(); | ||
1344 | } | 1347 | } |
1345 | 1348 | ||
1346 | static void | 1349 | static void |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 2ceb08388582..1439eb504c22 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -373,5 +373,6 @@ static int rcu_preempt_needs_cpu(int cpu); | |||
373 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); | 373 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); |
374 | static void rcu_preempt_send_cbs_to_orphanage(void); | 374 | static void rcu_preempt_send_cbs_to_orphanage(void); |
375 | static void __init __rcu_init_preempt(void); | 375 | static void __init __rcu_init_preempt(void); |
376 | static void rcu_needs_cpu_flush(void); | ||
376 | 377 | ||
377 | #endif /* #ifndef RCU_TREE_NONCORE */ | 378 | #endif /* #ifndef RCU_TREE_NONCORE */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 3516de7091a1..ed241fc478f0 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -973,9 +973,19 @@ int rcu_needs_cpu(int cpu) | |||
973 | return rcu_needs_cpu_quick_check(cpu); | 973 | return rcu_needs_cpu_quick_check(cpu); |
974 | } | 974 | } |
975 | 975 | ||
976 | /* | ||
977 | * Check to see if we need to continue a callback-flush operations to | ||
978 | * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle | ||
979 | * entry is not configured, so we never do need to. | ||
980 | */ | ||
981 | static void rcu_needs_cpu_flush(void) | ||
982 | { | ||
983 | } | ||
984 | |||
976 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 985 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
977 | 986 | ||
978 | #define RCU_NEEDS_CPU_FLUSHES 5 | 987 | #define RCU_NEEDS_CPU_FLUSHES 5 |
988 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | ||
979 | 989 | ||
980 | /* | 990 | /* |
981 | * Check to see if any future RCU-related work will need to be done | 991 | * Check to see if any future RCU-related work will need to be done |
@@ -988,39 +998,62 @@ int rcu_needs_cpu(int cpu) | |||
988 | * only if all other CPUs are already in dynticks-idle mode. This will | 998 | * only if all other CPUs are already in dynticks-idle mode. This will |
989 | * allow the CPU cores to be powered down immediately, as opposed to after | 999 | * allow the CPU cores to be powered down immediately, as opposed to after |
990 | * waiting many milliseconds for grace periods to elapse. | 1000 | * waiting many milliseconds for grace periods to elapse. |
1001 | * | ||
1002 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | ||
1003 | * disabled, we do one pass of force_quiescent_state(), then do a | ||
1004 | * raise_softirq() to cause rcu_process_callbacks() to be invoked later. | ||
1005 | * The per-cpu rcu_dyntick_drain variable controls the sequencing. | ||
991 | */ | 1006 | */ |
992 | int rcu_needs_cpu(int cpu) | 1007 | int rcu_needs_cpu(int cpu) |
993 | { | 1008 | { |
994 | int c = 1; | 1009 | int c = 0; |
995 | int i; | ||
996 | int thatcpu; | 1010 | int thatcpu; |
997 | 1011 | ||
998 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ | 1012 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ |
999 | for_each_cpu_not(thatcpu, nohz_cpu_mask) | 1013 | for_each_cpu_not(thatcpu, nohz_cpu_mask) |
1000 | if (thatcpu != cpu) | 1014 | if (thatcpu != cpu) { |
1015 | per_cpu(rcu_dyntick_drain, cpu) = 0; | ||
1001 | return rcu_needs_cpu_quick_check(cpu); | 1016 | return rcu_needs_cpu_quick_check(cpu); |
1002 | |||
1003 | /* Try to push remaining RCU-sched and RCU-bh callbacks through. */ | ||
1004 | for (i = 0; i < RCU_NEEDS_CPU_FLUSHES && c; i++) { | ||
1005 | c = 0; | ||
1006 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { | ||
1007 | rcu_sched_qs(cpu); | ||
1008 | force_quiescent_state(&rcu_sched_state, 0); | ||
1009 | __rcu_process_callbacks(&rcu_sched_state, | ||
1010 | &per_cpu(rcu_sched_data, cpu)); | ||
1011 | c = !!per_cpu(rcu_sched_data, cpu).nxtlist; | ||
1012 | } | ||
1013 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { | ||
1014 | rcu_bh_qs(cpu); | ||
1015 | force_quiescent_state(&rcu_bh_state, 0); | ||
1016 | __rcu_process_callbacks(&rcu_bh_state, | ||
1017 | &per_cpu(rcu_bh_data, cpu)); | ||
1018 | c = !!per_cpu(rcu_bh_data, cpu).nxtlist; | ||
1019 | } | 1017 | } |
1018 | |||
1019 | /* Check and update the rcu_dyntick_drain sequencing. */ | ||
1020 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | ||
1021 | /* First time through, initialize the counter. */ | ||
1022 | per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; | ||
1023 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | ||
1024 | /* We have hit the limit, so time to give up. */ | ||
1025 | return rcu_needs_cpu_quick_check(cpu); | ||
1026 | } | ||
1027 | |||
1028 | /* Do one step pushing remaining RCU callbacks through. */ | ||
1029 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { | ||
1030 | rcu_sched_qs(cpu); | ||
1031 | force_quiescent_state(&rcu_sched_state, 0); | ||
1032 | c = c || per_cpu(rcu_sched_data, cpu).nxtlist; | ||
1033 | } | ||
1034 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { | ||
1035 | rcu_bh_qs(cpu); | ||
1036 | force_quiescent_state(&rcu_bh_state, 0); | ||
1037 | c = c || per_cpu(rcu_bh_data, cpu).nxtlist; | ||
1020 | } | 1038 | } |
1021 | 1039 | ||
1022 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | 1040 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ |
1041 | if (c) | ||
1042 | raise_softirq(RCU_SOFTIRQ); | ||
1023 | return c; | 1043 | return c; |
1024 | } | 1044 | } |
1025 | 1045 | ||
1046 | /* | ||
1047 | * Check to see if we need to continue a callback-flush operations to | ||
1048 | * allow the last CPU to enter dyntick-idle mode. | ||
1049 | */ | ||
1050 | static void rcu_needs_cpu_flush(void) | ||
1051 | { | ||
1052 | int cpu = smp_processor_id(); | ||
1053 | |||
1054 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) | ||
1055 | return; | ||
1056 | (void)rcu_needs_cpu(cpu); | ||
1057 | } | ||
1058 | |||
1026 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 1059 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |