aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-05-09 15:07:05 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-06-06 23:43:28 -0400
commit5955f7eecd77d6b440db278b266cfecdb72ecd00 (patch)
treea69a23707da5595be22fa09738be572a4ef69b4c /kernel/rcutree_plugin.h
parentfd4b352687fd8604d49c190c4c9ea9e369fd42d5 (diff)
rcu: Move RCU_FAST_NO_HZ per-CPU variables to rcu_dynticks structure
The RCU_FAST_NO_HZ code relies on a number of per-CPU variables. This works, but is hidden from someone scanning the data structures in rcutree.h. This commit therefore converts these per-CPU variables to fields in the per-CPU rcu_dynticks structures. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com> Tested-by: Pascal Chapperon <pascal.chapperon@wanadoo.fr>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h99
1 files changed, 44 insertions, 55 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 5449f02c4820..6bd9637d5d83 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1962,21 +1962,6 @@ static void rcu_idle_count_callbacks_posted(void)
1962#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ 1962#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
1963#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ 1963#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1964 1964
1965/* Loop counter for rcu_prepare_for_idle(). */
1966static DEFINE_PER_CPU(int, rcu_dyntick_drain);
1967/* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */
1968static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1969/* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */
1970static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
1971/* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */
1972static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires);
1973/* Enable special processing on first attempt to enter dyntick-idle mode. */
1974static DEFINE_PER_CPU(bool, rcu_idle_first_pass);
1975/* Running count of non-lazy callbacks posted, never decremented. */
1976static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted);
1977/* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */
1978static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);
1979
1980/* 1965/*
1981 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no 1966 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1982 * callbacks on this CPU, (2) this CPU has not yet attempted to enter 1967 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
@@ -1988,13 +1973,15 @@ static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);
1988 */ 1973 */
1989int rcu_needs_cpu(int cpu) 1974int rcu_needs_cpu(int cpu)
1990{ 1975{
1976 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1977
1991 /* Flag a new idle sojourn to the idle-entry state machine. */ 1978 /* Flag a new idle sojourn to the idle-entry state machine. */
1992 per_cpu(rcu_idle_first_pass, cpu) = 1; 1979 rdtp->idle_first_pass = 1;
1993 /* If no callbacks, RCU doesn't need the CPU. */ 1980 /* If no callbacks, RCU doesn't need the CPU. */
1994 if (!rcu_cpu_has_callbacks(cpu)) 1981 if (!rcu_cpu_has_callbacks(cpu))
1995 return 0; 1982 return 0;
1996 /* Otherwise, RCU needs the CPU only if it recently tried and failed. */ 1983 /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
1997 return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies; 1984 return rdtp->dyntick_holdoff == jiffies;
1998} 1985}
1999 1986
2000/* 1987/*
@@ -2075,21 +2062,24 @@ static void rcu_idle_gp_timer_func(unsigned long cpu_in)
2075 */ 2062 */
2076static void rcu_prepare_for_idle_init(int cpu) 2063static void rcu_prepare_for_idle_init(int cpu)
2077{ 2064{
2078 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2065 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2079 setup_timer(&per_cpu(rcu_idle_gp_timer, cpu), 2066
2080 rcu_idle_gp_timer_func, cpu); 2067 rdtp->dyntick_holdoff = jiffies - 1;
2081 per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1; 2068 setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
2082 per_cpu(rcu_idle_first_pass, cpu) = 1; 2069 rdtp->idle_gp_timer_expires = jiffies - 1;
2070 rdtp->idle_first_pass = 1;
2083} 2071}
2084 2072
2085/* 2073/*
2086 * Clean up for exit from idle. Because we are exiting from idle, there 2074 * Clean up for exit from idle. Because we are exiting from idle, there
2087 * is no longer any point to rcu_idle_gp_timer, so cancel it. This will 2075 * is no longer any point to ->idle_gp_timer, so cancel it. This will
2088 * do nothing if this timer is not active, so just cancel it unconditionally. 2076 * do nothing if this timer is not active, so just cancel it unconditionally.
2089 */ 2077 */
2090static void rcu_cleanup_after_idle(int cpu) 2078static void rcu_cleanup_after_idle(int cpu)
2091{ 2079{
2092 del_timer(&per_cpu(rcu_idle_gp_timer, cpu)); 2080 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2081
2082 del_timer(&rdtp->idle_gp_timer);
2093 trace_rcu_prep_idle("Cleanup after idle"); 2083 trace_rcu_prep_idle("Cleanup after idle");
2094} 2084}
2095 2085
@@ -2108,42 +2098,41 @@ static void rcu_cleanup_after_idle(int cpu)
2108 * Because it is not legal to invoke rcu_process_callbacks() with irqs 2098 * Because it is not legal to invoke rcu_process_callbacks() with irqs
2109 * disabled, we do one pass of force_quiescent_state(), then do a 2099 * disabled, we do one pass of force_quiescent_state(), then do a
2110 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked 2100 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
2111 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. 2101 * later. The ->dyntick_drain field controls the sequencing.
2112 * 2102 *
2113 * The caller must have disabled interrupts. 2103 * The caller must have disabled interrupts.
2114 */ 2104 */
2115static void rcu_prepare_for_idle(int cpu) 2105static void rcu_prepare_for_idle(int cpu)
2116{ 2106{
2117 struct timer_list *tp; 2107 struct timer_list *tp;
2108 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2118 2109
2119 /* 2110 /*
2120 * If this is an idle re-entry, for example, due to use of 2111 * If this is an idle re-entry, for example, due to use of
2121 * RCU_NONIDLE() or the new idle-loop tracing API within the idle 2112 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
2122 * loop, then don't take any state-machine actions, unless the 2113 * loop, then don't take any state-machine actions, unless the
2123 * momentary exit from idle queued additional non-lazy callbacks. 2114 * momentary exit from idle queued additional non-lazy callbacks.
2124 * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks 2115 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
2125 * pending. 2116 * pending.
2126 */ 2117 */
2127 if (!per_cpu(rcu_idle_first_pass, cpu) && 2118 if (!rdtp->idle_first_pass &&
2128 (per_cpu(rcu_nonlazy_posted, cpu) == 2119 (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
2129 per_cpu(rcu_nonlazy_posted_snap, cpu))) {
2130 if (rcu_cpu_has_callbacks(cpu)) { 2120 if (rcu_cpu_has_callbacks(cpu)) {
2131 tp = &per_cpu(rcu_idle_gp_timer, cpu); 2121 tp = &rdtp->idle_gp_timer;
2132 mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); 2122 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
2133 } 2123 }
2134 return; 2124 return;
2135 } 2125 }
2136 per_cpu(rcu_idle_first_pass, cpu) = 0; 2126 rdtp->idle_first_pass = 0;
2137 per_cpu(rcu_nonlazy_posted_snap, cpu) = 2127 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
2138 per_cpu(rcu_nonlazy_posted, cpu) - 1;
2139 2128
2140 /* 2129 /*
2141 * If there are no callbacks on this CPU, enter dyntick-idle mode. 2130 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2142 * Also reset state to avoid prejudicing later attempts. 2131 * Also reset state to avoid prejudicing later attempts.
2143 */ 2132 */
2144 if (!rcu_cpu_has_callbacks(cpu)) { 2133 if (!rcu_cpu_has_callbacks(cpu)) {
2145 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2134 rdtp->dyntick_holdoff = jiffies - 1;
2146 per_cpu(rcu_dyntick_drain, cpu) = 0; 2135 rdtp->dyntick_drain = 0;
2147 trace_rcu_prep_idle("No callbacks"); 2136 trace_rcu_prep_idle("No callbacks");
2148 return; 2137 return;
2149 } 2138 }
@@ -2152,38 +2141,37 @@ static void rcu_prepare_for_idle(int cpu)
2152 * If in holdoff mode, just return. We will presumably have 2141 * If in holdoff mode, just return. We will presumably have
2153 * refrained from disabling the scheduling-clock tick. 2142 * refrained from disabling the scheduling-clock tick.
2154 */ 2143 */
2155 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { 2144 if (rdtp->dyntick_holdoff == jiffies) {
2156 trace_rcu_prep_idle("In holdoff"); 2145 trace_rcu_prep_idle("In holdoff");
2157 return; 2146 return;
2158 } 2147 }
2159 2148
2160 /* Check and update the rcu_dyntick_drain sequencing. */ 2149 /* Check and update the ->dyntick_drain sequencing. */
2161 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2150 if (rdtp->dyntick_drain <= 0) {
2162 /* First time through, initialize the counter. */ 2151 /* First time through, initialize the counter. */
2163 per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; 2152 rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
2164 } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && 2153 } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
2165 !rcu_pending(cpu) && 2154 !rcu_pending(cpu) &&
2166 !local_softirq_pending()) { 2155 !local_softirq_pending()) {
2167 /* Can we go dyntick-idle despite still having callbacks? */ 2156 /* Can we go dyntick-idle despite still having callbacks? */
2168 per_cpu(rcu_dyntick_drain, cpu) = 0; 2157 rdtp->dyntick_drain = 0;
2169 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2158 rdtp->dyntick_holdoff = jiffies;
2170 if (rcu_cpu_has_nonlazy_callbacks(cpu)) { 2159 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
2171 trace_rcu_prep_idle("Dyntick with callbacks"); 2160 trace_rcu_prep_idle("Dyntick with callbacks");
2172 per_cpu(rcu_idle_gp_timer_expires, cpu) = 2161 rdtp->idle_gp_timer_expires =
2173 jiffies + RCU_IDLE_GP_DELAY; 2162 jiffies + RCU_IDLE_GP_DELAY;
2174 } else { 2163 } else {
2175 per_cpu(rcu_idle_gp_timer_expires, cpu) = 2164 rdtp->idle_gp_timer_expires =
2176 jiffies + RCU_IDLE_LAZY_GP_DELAY; 2165 jiffies + RCU_IDLE_LAZY_GP_DELAY;
2177 trace_rcu_prep_idle("Dyntick with lazy callbacks"); 2166 trace_rcu_prep_idle("Dyntick with lazy callbacks");
2178 } 2167 }
2179 tp = &per_cpu(rcu_idle_gp_timer, cpu); 2168 tp = &rdtp->idle_gp_timer;
2180 mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); 2169 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
2181 per_cpu(rcu_nonlazy_posted_snap, cpu) = 2170 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
2182 per_cpu(rcu_nonlazy_posted, cpu);
2183 return; /* Nothing more to do immediately. */ 2171 return; /* Nothing more to do immediately. */
2184 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2172 } else if (--(rdtp->dyntick_drain) <= 0) {
2185 /* We have hit the limit, so time to give up. */ 2173 /* We have hit the limit, so time to give up. */
2186 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2174 rdtp->dyntick_holdoff = jiffies;
2187 trace_rcu_prep_idle("Begin holdoff"); 2175 trace_rcu_prep_idle("Begin holdoff");
2188 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ 2176 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2189 return; 2177 return;
@@ -2229,7 +2217,7 @@ static void rcu_prepare_for_idle(int cpu)
2229 */ 2217 */
2230static void rcu_idle_count_callbacks_posted(void) 2218static void rcu_idle_count_callbacks_posted(void)
2231{ 2219{
2232 __this_cpu_add(rcu_nonlazy_posted, 1); 2220 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
2233} 2221}
2234 2222
2235#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 2223#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
@@ -2240,11 +2228,12 @@ static void rcu_idle_count_callbacks_posted(void)
2240 2228
2241static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 2229static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2242{ 2230{
2243 struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu); 2231 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2232 struct timer_list *tltp = &rdtp->idle_gp_timer;
2244 2233
2245 sprintf(cp, "drain=%d %c timer=%lu", 2234 sprintf(cp, "drain=%d %c timer=%lu",
2246 per_cpu(rcu_dyntick_drain, cpu), 2235 rdtp->dyntick_drain,
2247 per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.', 2236 rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
2248 timer_pending(tltp) ? tltp->expires - jiffies : -1); 2237 timer_pending(tltp) ? tltp->expires - jiffies : -1);
2249} 2238}
2250 2239