aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h179
1 files changed, 99 insertions, 80 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 2411000d9869..3e4899459f3d 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -153,7 +153,7 @@ static void rcu_preempt_qs(int cpu)
153 * 153 *
154 * Caller must disable preemption. 154 * Caller must disable preemption.
155 */ 155 */
156void rcu_preempt_note_context_switch(void) 156static void rcu_preempt_note_context_switch(int cpu)
157{ 157{
158 struct task_struct *t = current; 158 struct task_struct *t = current;
159 unsigned long flags; 159 unsigned long flags;
@@ -164,7 +164,7 @@ void rcu_preempt_note_context_switch(void)
164 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { 164 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
165 165
166 /* Possibly blocking in an RCU read-side critical section. */ 166 /* Possibly blocking in an RCU read-side critical section. */
167 rdp = __this_cpu_ptr(rcu_preempt_state.rda); 167 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
168 rnp = rdp->mynode; 168 rnp = rdp->mynode;
169 raw_spin_lock_irqsave(&rnp->lock, flags); 169 raw_spin_lock_irqsave(&rnp->lock, flags);
170 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; 170 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -228,7 +228,7 @@ void rcu_preempt_note_context_switch(void)
228 * means that we continue to block the current grace period. 228 * means that we continue to block the current grace period.
229 */ 229 */
230 local_irq_save(flags); 230 local_irq_save(flags);
231 rcu_preempt_qs(smp_processor_id()); 231 rcu_preempt_qs(cpu);
232 local_irq_restore(flags); 232 local_irq_restore(flags);
233} 233}
234 234
@@ -1002,6 +1002,14 @@ void rcu_force_quiescent_state(void)
1002EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 1002EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
1003 1003
1004/* 1004/*
1005 * Because preemptible RCU does not exist, we never have to check for
1006 * CPUs being in quiescent states.
1007 */
1008static void rcu_preempt_note_context_switch(int cpu)
1009{
1010}
1011
1012/*
1005 * Because preemptible RCU does not exist, there are never any preempted 1013 * Because preemptible RCU does not exist, there are never any preempted
1006 * RCU readers. 1014 * RCU readers.
1007 */ 1015 */
@@ -1886,8 +1894,9 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
1886 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs 1894 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1887 * any flavor of RCU. 1895 * any flavor of RCU.
1888 */ 1896 */
1889int rcu_needs_cpu(int cpu) 1897int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1890{ 1898{
1899 *delta_jiffies = ULONG_MAX;
1891 return rcu_cpu_has_callbacks(cpu); 1900 return rcu_cpu_has_callbacks(cpu);
1892} 1901}
1893 1902
@@ -1962,41 +1971,6 @@ static void rcu_idle_count_callbacks_posted(void)
1962#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ 1971#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
1963#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ 1972#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1964 1973
1965/* Loop counter for rcu_prepare_for_idle(). */
1966static DEFINE_PER_CPU(int, rcu_dyntick_drain);
1967/* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */
1968static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1969/* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */
1970static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
1971/* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */
1972static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires);
1973/* Enable special processing on first attempt to enter dyntick-idle mode. */
1974static DEFINE_PER_CPU(bool, rcu_idle_first_pass);
1975/* Running count of non-lazy callbacks posted, never decremented. */
1976static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted);
1977/* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */
1978static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);
1979
1980/*
1981 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1982 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1983 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1984 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1985 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1986 * it is better to incur scheduling-clock interrupts than to spin
1987 * continuously for the same time duration!
1988 */
1989int rcu_needs_cpu(int cpu)
1990{
1991 /* Flag a new idle sojourn to the idle-entry state machine. */
1992 per_cpu(rcu_idle_first_pass, cpu) = 1;
1993 /* If no callbacks, RCU doesn't need the CPU. */
1994 if (!rcu_cpu_has_callbacks(cpu))
1995 return 0;
1996 /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
1997 return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
1998}
1999
2000/* 1974/*
2001 * Does the specified flavor of RCU have non-lazy callbacks pending on 1975 * Does the specified flavor of RCU have non-lazy callbacks pending on
2002 * the specified CPU? Both RCU flavor and CPU are specified by the 1976 * the specified CPU? Both RCU flavor and CPU are specified by the
@@ -2040,6 +2014,47 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
2040} 2014}
2041 2015
2042/* 2016/*
2017 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
2018 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
2019 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
2020 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
2021 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
2022 * it is better to incur scheduling-clock interrupts than to spin
2023 * continuously for the same time duration!
2024 *
2025 * The delta_jiffies argument is used to store the time when RCU is
2026 * going to need the CPU again if it still has callbacks. The reason
2027 * for this is that rcu_prepare_for_idle() might need to post a timer,
2028 * but if so, it will do so after tick_nohz_stop_sched_tick() has set
2029 * the wakeup time for this CPU. This means that RCU's timer can be
2030 * delayed until the wakeup time, which defeats the purpose of posting
2031 * a timer.
2032 */
2033int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
2034{
2035 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2036
2037 /* Flag a new idle sojourn to the idle-entry state machine. */
2038 rdtp->idle_first_pass = 1;
2039 /* If no callbacks, RCU doesn't need the CPU. */
2040 if (!rcu_cpu_has_callbacks(cpu)) {
2041 *delta_jiffies = ULONG_MAX;
2042 return 0;
2043 }
2044 if (rdtp->dyntick_holdoff == jiffies) {
2045 /* RCU recently tried and failed, so don't try again. */
2046 *delta_jiffies = 1;
2047 return 1;
2048 }
2049 /* Set up for the possibility that RCU will post a timer. */
2050 if (rcu_cpu_has_nonlazy_callbacks(cpu))
2051 *delta_jiffies = RCU_IDLE_GP_DELAY;
2052 else
2053 *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
2054 return 0;
2055}
2056
2057/*
2043 * Handler for smp_call_function_single(). The only point of this 2058 * Handler for smp_call_function_single(). The only point of this
2044 * handler is to wake the CPU up, so the handler does only tracing. 2059 * handler is to wake the CPU up, so the handler does only tracing.
2045 */ 2060 */
@@ -2075,21 +2090,24 @@ static void rcu_idle_gp_timer_func(unsigned long cpu_in)
2075 */ 2090 */
2076static void rcu_prepare_for_idle_init(int cpu) 2091static void rcu_prepare_for_idle_init(int cpu)
2077{ 2092{
2078 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2093 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2079 setup_timer(&per_cpu(rcu_idle_gp_timer, cpu), 2094
2080 rcu_idle_gp_timer_func, cpu); 2095 rdtp->dyntick_holdoff = jiffies - 1;
2081 per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1; 2096 setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
2082 per_cpu(rcu_idle_first_pass, cpu) = 1; 2097 rdtp->idle_gp_timer_expires = jiffies - 1;
2098 rdtp->idle_first_pass = 1;
2083} 2099}
2084 2100
2085/* 2101/*
2086 * Clean up for exit from idle. Because we are exiting from idle, there 2102 * Clean up for exit from idle. Because we are exiting from idle, there
2087 * is no longer any point to rcu_idle_gp_timer, so cancel it. This will 2103 * is no longer any point to ->idle_gp_timer, so cancel it. This will
2088 * do nothing if this timer is not active, so just cancel it unconditionally. 2104 * do nothing if this timer is not active, so just cancel it unconditionally.
2089 */ 2105 */
2090static void rcu_cleanup_after_idle(int cpu) 2106static void rcu_cleanup_after_idle(int cpu)
2091{ 2107{
2092 del_timer(&per_cpu(rcu_idle_gp_timer, cpu)); 2108 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2109
2110 del_timer(&rdtp->idle_gp_timer);
2093 trace_rcu_prep_idle("Cleanup after idle"); 2111 trace_rcu_prep_idle("Cleanup after idle");
2094} 2112}
2095 2113
@@ -2108,42 +2126,41 @@ static void rcu_cleanup_after_idle(int cpu)
2108 * Because it is not legal to invoke rcu_process_callbacks() with irqs 2126 * Because it is not legal to invoke rcu_process_callbacks() with irqs
2109 * disabled, we do one pass of force_quiescent_state(), then do a 2127 * disabled, we do one pass of force_quiescent_state(), then do a
2110 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked 2128 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
2111 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. 2129 * later. The ->dyntick_drain field controls the sequencing.
2112 * 2130 *
2113 * The caller must have disabled interrupts. 2131 * The caller must have disabled interrupts.
2114 */ 2132 */
2115static void rcu_prepare_for_idle(int cpu) 2133static void rcu_prepare_for_idle(int cpu)
2116{ 2134{
2117 struct timer_list *tp; 2135 struct timer_list *tp;
2136 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2118 2137
2119 /* 2138 /*
2120 * If this is an idle re-entry, for example, due to use of 2139 * If this is an idle re-entry, for example, due to use of
2121 * RCU_NONIDLE() or the new idle-loop tracing API within the idle 2140 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
2122 * loop, then don't take any state-machine actions, unless the 2141 * loop, then don't take any state-machine actions, unless the
2123 * momentary exit from idle queued additional non-lazy callbacks. 2142 * momentary exit from idle queued additional non-lazy callbacks.
2124 * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks 2143 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
2125 * pending. 2144 * pending.
2126 */ 2145 */
2127 if (!per_cpu(rcu_idle_first_pass, cpu) && 2146 if (!rdtp->idle_first_pass &&
2128 (per_cpu(rcu_nonlazy_posted, cpu) == 2147 (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
2129 per_cpu(rcu_nonlazy_posted_snap, cpu))) {
2130 if (rcu_cpu_has_callbacks(cpu)) { 2148 if (rcu_cpu_has_callbacks(cpu)) {
2131 tp = &per_cpu(rcu_idle_gp_timer, cpu); 2149 tp = &rdtp->idle_gp_timer;
2132 mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); 2150 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
2133 } 2151 }
2134 return; 2152 return;
2135 } 2153 }
2136 per_cpu(rcu_idle_first_pass, cpu) = 0; 2154 rdtp->idle_first_pass = 0;
2137 per_cpu(rcu_nonlazy_posted_snap, cpu) = 2155 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
2138 per_cpu(rcu_nonlazy_posted, cpu) - 1;
2139 2156
2140 /* 2157 /*
2141 * If there are no callbacks on this CPU, enter dyntick-idle mode. 2158 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2142 * Also reset state to avoid prejudicing later attempts. 2159 * Also reset state to avoid prejudicing later attempts.
2143 */ 2160 */
2144 if (!rcu_cpu_has_callbacks(cpu)) { 2161 if (!rcu_cpu_has_callbacks(cpu)) {
2145 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2162 rdtp->dyntick_holdoff = jiffies - 1;
2146 per_cpu(rcu_dyntick_drain, cpu) = 0; 2163 rdtp->dyntick_drain = 0;
2147 trace_rcu_prep_idle("No callbacks"); 2164 trace_rcu_prep_idle("No callbacks");
2148 return; 2165 return;
2149 } 2166 }
@@ -2152,36 +2169,37 @@ static void rcu_prepare_for_idle(int cpu)
2152 * If in holdoff mode, just return. We will presumably have 2169 * If in holdoff mode, just return. We will presumably have
2153 * refrained from disabling the scheduling-clock tick. 2170 * refrained from disabling the scheduling-clock tick.
2154 */ 2171 */
2155 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { 2172 if (rdtp->dyntick_holdoff == jiffies) {
2156 trace_rcu_prep_idle("In holdoff"); 2173 trace_rcu_prep_idle("In holdoff");
2157 return; 2174 return;
2158 } 2175 }
2159 2176
2160 /* Check and update the rcu_dyntick_drain sequencing. */ 2177 /* Check and update the ->dyntick_drain sequencing. */
2161 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2178 if (rdtp->dyntick_drain <= 0) {
2162 /* First time through, initialize the counter. */ 2179 /* First time through, initialize the counter. */
2163 per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; 2180 rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
2164 } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && 2181 } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
2165 !rcu_pending(cpu) && 2182 !rcu_pending(cpu) &&
2166 !local_softirq_pending()) { 2183 !local_softirq_pending()) {
2167 /* Can we go dyntick-idle despite still having callbacks? */ 2184 /* Can we go dyntick-idle despite still having callbacks? */
2168 trace_rcu_prep_idle("Dyntick with callbacks"); 2185 rdtp->dyntick_drain = 0;
2169 per_cpu(rcu_dyntick_drain, cpu) = 0; 2186 rdtp->dyntick_holdoff = jiffies;
2170 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2187 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
2171 if (rcu_cpu_has_nonlazy_callbacks(cpu)) 2188 trace_rcu_prep_idle("Dyntick with callbacks");
2172 per_cpu(rcu_idle_gp_timer_expires, cpu) = 2189 rdtp->idle_gp_timer_expires =
2173 jiffies + RCU_IDLE_GP_DELAY; 2190 jiffies + RCU_IDLE_GP_DELAY;
2174 else 2191 } else {
2175 per_cpu(rcu_idle_gp_timer_expires, cpu) = 2192 rdtp->idle_gp_timer_expires =
2176 jiffies + RCU_IDLE_LAZY_GP_DELAY; 2193 jiffies + RCU_IDLE_LAZY_GP_DELAY;
2177 tp = &per_cpu(rcu_idle_gp_timer, cpu); 2194 trace_rcu_prep_idle("Dyntick with lazy callbacks");
2178 mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); 2195 }
2179 per_cpu(rcu_nonlazy_posted_snap, cpu) = 2196 tp = &rdtp->idle_gp_timer;
2180 per_cpu(rcu_nonlazy_posted, cpu); 2197 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
2198 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
2181 return; /* Nothing more to do immediately. */ 2199 return; /* Nothing more to do immediately. */
2182 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2200 } else if (--(rdtp->dyntick_drain) <= 0) {
2183 /* We have hit the limit, so time to give up. */ 2201 /* We have hit the limit, so time to give up. */
2184 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2202 rdtp->dyntick_holdoff = jiffies;
2185 trace_rcu_prep_idle("Begin holdoff"); 2203 trace_rcu_prep_idle("Begin holdoff");
2186 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ 2204 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2187 return; 2205 return;
@@ -2227,7 +2245,7 @@ static void rcu_prepare_for_idle(int cpu)
2227 */ 2245 */
2228static void rcu_idle_count_callbacks_posted(void) 2246static void rcu_idle_count_callbacks_posted(void)
2229{ 2247{
2230 __this_cpu_add(rcu_nonlazy_posted, 1); 2248 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
2231} 2249}
2232 2250
2233#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 2251#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
@@ -2238,11 +2256,12 @@ static void rcu_idle_count_callbacks_posted(void)
2238 2256
2239static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 2257static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2240{ 2258{
2241 struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu); 2259 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2260 struct timer_list *tltp = &rdtp->idle_gp_timer;
2242 2261
2243 sprintf(cp, "drain=%d %c timer=%lu", 2262 sprintf(cp, "drain=%d %c timer=%lu",
2244 per_cpu(rcu_dyntick_drain, cpu), 2263 rdtp->dyntick_drain,
2245 per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.', 2264 rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
2246 timer_pending(tltp) ? tltp->expires - jiffies : -1); 2265 timer_pending(tltp) ? tltp->expires - jiffies : -1);
2247} 2266}
2248 2267