diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-01-06 17:11:30 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-02-21 12:03:25 -0500 |
commit | 486e259340fc4c60474f2c14703e3b3634bb58ca (patch) | |
tree | 70a58702194588fa0773463523f72c682785d040 /kernel/rcutree_plugin.h | |
parent | 0bb7b59d6e2b8440cd7097097dd4bbfc4d76ed07 (diff) |
rcu: Avoid waking up CPUs having only kfree_rcu() callbacks
When CONFIG_RCU_FAST_NO_HZ is enabled, RCU will allow a given CPU to
enter dyntick-idle mode even if it still has RCU callbacks queued.
RCU avoids system hangs in this case by scheduling a timer for several
jiffies in the future. However, if all of the callbacks on that CPU
are from kfree_rcu(), there is no reason to wake the CPU up, as it is
not a problem to defer freeing of memory.
This commit therefore tracks the number of callbacks on a given CPU
that are from kfree_rcu(), and avoids scheduling the timer if all of
a given CPU's callbacks are from kfree_rcu().
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 79 |
1 files changed, 76 insertions, 3 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 3680b6b35bf3..7adf232bb66b 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -671,10 +671,24 @@ static void rcu_preempt_do_callbacks(void) | |||
671 | */ | 671 | */ |
672 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 672 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
673 | { | 673 | { |
674 | __call_rcu(head, func, &rcu_preempt_state); | 674 | __call_rcu(head, func, &rcu_preempt_state, 0); |
675 | } | 675 | } |
676 | EXPORT_SYMBOL_GPL(call_rcu); | 676 | EXPORT_SYMBOL_GPL(call_rcu); |
677 | 677 | ||
678 | /* | ||
679 | * Queue an RCU callback for lazy invocation after a grace period. | ||
680 | * This will likely be later named something like "call_rcu_lazy()", | ||
681 | * but this change will require some way of tagging the lazy RCU | ||
682 | * callbacks in the list of pending callbacks. Until then, this | ||
683 | * function may only be called from __kfree_rcu(). | ||
684 | */ | ||
685 | void kfree_call_rcu(struct rcu_head *head, | ||
686 | void (*func)(struct rcu_head *rcu)) | ||
687 | { | ||
688 | __call_rcu(head, func, &rcu_preempt_state, 1); | ||
689 | } | ||
690 | EXPORT_SYMBOL_GPL(kfree_call_rcu); | ||
691 | |||
678 | /** | 692 | /** |
679 | * synchronize_rcu - wait until a grace period has elapsed. | 693 | * synchronize_rcu - wait until a grace period has elapsed. |
680 | * | 694 | * |
@@ -1065,6 +1079,22 @@ static void rcu_preempt_process_callbacks(void) | |||
1065 | } | 1079 | } |
1066 | 1080 | ||
1067 | /* | 1081 | /* |
1082 | * Queue an RCU callback for lazy invocation after a grace period. | ||
1083 | * This will likely be later named something like "call_rcu_lazy()", | ||
1084 | * but this change will require some way of tagging the lazy RCU | ||
1085 | * callbacks in the list of pending callbacks. Until then, this | ||
1086 | * function may only be called from __kfree_rcu(). | ||
1087 | * | ||
1088 | * Because there is no preemptible RCU, we use RCU-sched instead. | ||
1089 | */ | ||
1090 | void kfree_call_rcu(struct rcu_head *head, | ||
1091 | void (*func)(struct rcu_head *rcu)) | ||
1092 | { | ||
1093 | __call_rcu(head, func, &rcu_sched_state, 1); | ||
1094 | } | ||
1095 | EXPORT_SYMBOL_GPL(kfree_call_rcu); | ||
1096 | |||
1097 | /* | ||
1068 | * Wait for an rcu-preempt grace period, but make it happen quickly. | 1098 | * Wait for an rcu-preempt grace period, but make it happen quickly. |
1069 | * But because preemptible RCU does not exist, map to rcu-sched. | 1099 | * But because preemptible RCU does not exist, map to rcu-sched. |
1070 | */ | 1100 | */ |
@@ -2052,6 +2082,48 @@ int rcu_needs_cpu(int cpu) | |||
2052 | } | 2082 | } |
2053 | 2083 | ||
2054 | /* | 2084 | /* |
2085 | * Does the specified flavor of RCU have non-lazy callbacks pending on | ||
2086 | * the specified CPU? Both RCU flavor and CPU are specified by the | ||
2087 | * rcu_data structure. | ||
2088 | */ | ||
2089 | static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp) | ||
2090 | { | ||
2091 | return rdp->qlen != rdp->qlen_lazy; | ||
2092 | } | ||
2093 | |||
2094 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
2095 | |||
2096 | /* | ||
2097 | * Are there non-lazy RCU-preempt callbacks? (There cannot be if there | ||
2098 | * is no RCU-preempt in the kernel.) | ||
2099 | */ | ||
2100 | static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu) | ||
2101 | { | ||
2102 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | ||
2103 | |||
2104 | return __rcu_cpu_has_nonlazy_callbacks(rdp); | ||
2105 | } | ||
2106 | |||
2107 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
2108 | |||
2109 | static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu) | ||
2110 | { | ||
2111 | return 0; | ||
2112 | } | ||
2113 | |||
2114 | #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
2115 | |||
2116 | /* | ||
2117 | * Does any flavor of RCU have non-lazy callbacks on the specified CPU? | ||
2118 | */ | ||
2119 | static bool rcu_cpu_has_nonlazy_callbacks(int cpu) | ||
2120 | { | ||
2121 | return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) || | ||
2122 | __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) || | ||
2123 | rcu_preempt_cpu_has_nonlazy_callbacks(cpu); | ||
2124 | } | ||
2125 | |||
2126 | /* | ||
2055 | * Timer handler used to force CPU to start pushing its remaining RCU | 2127 | * Timer handler used to force CPU to start pushing its remaining RCU |
2056 | * callbacks in the case where it entered dyntick-idle mode with callbacks | 2128 | * callbacks in the case where it entered dyntick-idle mode with callbacks |
2057 | * pending. The hander doesn't really need to do anything because the | 2129 | * pending. The hander doesn't really need to do anything because the |
@@ -2149,8 +2221,9 @@ static void rcu_prepare_for_idle(int cpu) | |||
2149 | trace_rcu_prep_idle("Dyntick with callbacks"); | 2221 | trace_rcu_prep_idle("Dyntick with callbacks"); |
2150 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2222 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
2151 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 2223 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; |
2152 | hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu), | 2224 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) |
2153 | rcu_idle_gp_wait, HRTIMER_MODE_REL); | 2225 | hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu), |
2226 | rcu_idle_gp_wait, HRTIMER_MODE_REL); | ||
2154 | return; /* Nothing more to do immediately. */ | 2227 | return; /* Nothing more to do immediately. */ |
2155 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2228 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { |
2156 | /* We have hit the limit, so time to give up. */ | 2229 | /* We have hit the limit, so time to give up. */ |