diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2011-09-30 15:10:22 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-12-11 13:31:24 -0500 |
commit | 9b2e4f1880b789be1f24f9684f7a54b90310b5c0 (patch) | |
tree | 1fa922e0616e298837a7079cb49118188a58186c /include | |
parent | b804cb9e91c6c304959c69d4f9daeef4ffdba71c (diff) |
rcu: Track idleness independent of idle tasks
Earlier versions of RCU used the scheduling-clock tick to detect idleness
by checking for the idle task, but handled idleness differently for
CONFIG_NO_HZ=y. But there are now a number of uses of RCU read-side
critical sections in the idle task, for example, for tracing. A more
fine-grained detection of idleness is therefore required.
This commit presses the old dyntick-idle code into full-time service,
so that rcu_idle_enter(), previously known as rcu_enter_nohz(), is
always invoked at the beginning of an idle loop iteration. Similarly,
rcu_idle_exit(), previously known as rcu_exit_nohz(), is always invoked
at the end of an idle-loop iteration. This allows the idle task to
use RCU everywhere except between consecutive rcu_idle_enter() and
rcu_idle_exit() calls, in turn allowing architecture maintainers to
specify exactly where in the idle loop that RCU may be used.
Because some of the userspace upcall uses can result in what looks
to RCU like half of an interrupt, it is not possible to expect that
the irq_enter() and irq_exit() hooks will give exact counts. This
patch therefore expands the ->dynticks_nesting counter to 64 bits
and uses two separate bitfields to count process/idle transitions
and interrupt entry/exit transitions. It is presumed that userspace
upcalls do not happen in the idle loop or from usermode execution
(though usermode might do a system call that results in an upcall).
The counter is hard-reset on each process/idle transition, which
avoids the interrupt entry/exit error from accumulating. Overflow
is avoided by the 64-bitness of the ->dyntick_nesting counter.
This commit also adds warnings if a non-idle task asks RCU to enter
idle state (and these checks will need some adjustment before applying
Frederic's OS-jitter patches (http://lkml.org/lkml/2011/10/7/246).
In addition, validation of ->dynticks and ->dynticks_nesting is added.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/hardirq.h | 21 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 21 | ||||
-rw-r--r-- | include/linux/tick.h | 11 | ||||
-rw-r--r-- | include/trace/events/rcu.h | 10 |
4 files changed, 19 insertions, 44 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f743883f769e..bb7f30971858 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -139,20 +139,7 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
139 | extern void account_system_vtime(struct task_struct *tsk); | 139 | extern void account_system_vtime(struct task_struct *tsk); |
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | #if defined(CONFIG_NO_HZ) | ||
143 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) | 142 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
144 | extern void rcu_enter_nohz(void); | ||
145 | extern void rcu_exit_nohz(void); | ||
146 | |||
147 | static inline void rcu_irq_enter(void) | ||
148 | { | ||
149 | rcu_exit_nohz(); | ||
150 | } | ||
151 | |||
152 | static inline void rcu_irq_exit(void) | ||
153 | { | ||
154 | rcu_enter_nohz(); | ||
155 | } | ||
156 | 143 | ||
157 | static inline void rcu_nmi_enter(void) | 144 | static inline void rcu_nmi_enter(void) |
158 | { | 145 | { |
@@ -163,17 +150,9 @@ static inline void rcu_nmi_exit(void) | |||
163 | } | 150 | } |
164 | 151 | ||
165 | #else | 152 | #else |
166 | extern void rcu_irq_enter(void); | ||
167 | extern void rcu_irq_exit(void); | ||
168 | extern void rcu_nmi_enter(void); | 153 | extern void rcu_nmi_enter(void); |
169 | extern void rcu_nmi_exit(void); | 154 | extern void rcu_nmi_exit(void); |
170 | #endif | 155 | #endif |
171 | #else | ||
172 | # define rcu_irq_enter() do { } while (0) | ||
173 | # define rcu_irq_exit() do { } while (0) | ||
174 | # define rcu_nmi_enter() do { } while (0) | ||
175 | # define rcu_nmi_exit() do { } while (0) | ||
176 | #endif /* #if defined(CONFIG_NO_HZ) */ | ||
177 | 156 | ||
178 | /* | 157 | /* |
179 | * It is safe to do non-atomic ops on ->hardirq_context, | 158 | * It is safe to do non-atomic ops on ->hardirq_context, |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 2cf4226ade7e..cd1ad4b04c6d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -177,23 +177,10 @@ extern void rcu_sched_qs(int cpu); | |||
177 | extern void rcu_bh_qs(int cpu); | 177 | extern void rcu_bh_qs(int cpu); |
178 | extern void rcu_check_callbacks(int cpu, int user); | 178 | extern void rcu_check_callbacks(int cpu, int user); |
179 | struct notifier_block; | 179 | struct notifier_block; |
180 | 180 | extern void rcu_idle_enter(void); | |
181 | #ifdef CONFIG_NO_HZ | 181 | extern void rcu_idle_exit(void); |
182 | 182 | extern void rcu_irq_enter(void); | |
183 | extern void rcu_enter_nohz(void); | 183 | extern void rcu_irq_exit(void); |
184 | extern void rcu_exit_nohz(void); | ||
185 | |||
186 | #else /* #ifdef CONFIG_NO_HZ */ | ||
187 | |||
188 | static inline void rcu_enter_nohz(void) | ||
189 | { | ||
190 | } | ||
191 | |||
192 | static inline void rcu_exit_nohz(void) | ||
193 | { | ||
194 | } | ||
195 | |||
196 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
197 | 184 | ||
198 | /* | 185 | /* |
199 | * Infrastructure to implement the synchronize_() primitives in | 186 | * Infrastructure to implement the synchronize_() primitives in |
diff --git a/include/linux/tick.h b/include/linux/tick.h index b232ccc0ee29..ca40838fdfb7 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
@@ -127,8 +127,15 @@ extern ktime_t tick_nohz_get_sleep_length(void); | |||
127 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); | 127 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); |
128 | extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); | 128 | extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); |
129 | # else | 129 | # else |
130 | static inline void tick_nohz_stop_sched_tick(int inidle) { } | 130 | static inline void tick_nohz_stop_sched_tick(int inidle) |
131 | static inline void tick_nohz_restart_sched_tick(void) { } | 131 | { |
132 | if (inidle) | ||
133 | rcu_idle_enter(); | ||
134 | } | ||
135 | static inline void tick_nohz_restart_sched_tick(void) | ||
136 | { | ||
137 | rcu_idle_exit(); | ||
138 | } | ||
132 | static inline ktime_t tick_nohz_get_sleep_length(void) | 139 | static inline ktime_t tick_nohz_get_sleep_length(void) |
133 | { | 140 | { |
134 | ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; | 141 | ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; |
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 669fbd62ec25..e5771804c507 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h | |||
@@ -246,19 +246,21 @@ TRACE_EVENT(rcu_fqs, | |||
246 | */ | 246 | */ |
247 | TRACE_EVENT(rcu_dyntick, | 247 | TRACE_EVENT(rcu_dyntick, |
248 | 248 | ||
249 | TP_PROTO(char *polarity), | 249 | TP_PROTO(char *polarity, int nesting), |
250 | 250 | ||
251 | TP_ARGS(polarity), | 251 | TP_ARGS(polarity, nesting), |
252 | 252 | ||
253 | TP_STRUCT__entry( | 253 | TP_STRUCT__entry( |
254 | __field(char *, polarity) | 254 | __field(char *, polarity) |
255 | __field(int, nesting) | ||
255 | ), | 256 | ), |
256 | 257 | ||
257 | TP_fast_assign( | 258 | TP_fast_assign( |
258 | __entry->polarity = polarity; | 259 | __entry->polarity = polarity; |
260 | __entry->nesting = nesting; | ||
259 | ), | 261 | ), |
260 | 262 | ||
261 | TP_printk("%s", __entry->polarity) | 263 | TP_printk("%s %d", __entry->polarity, __entry->nesting) |
262 | ); | 264 | ); |
263 | 265 | ||
264 | /* | 266 | /* |
@@ -443,7 +445,7 @@ TRACE_EVENT(rcu_batch_end, | |||
443 | #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) | 445 | #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) |
444 | #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0) | 446 | #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0) |
445 | #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) | 447 | #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) |
446 | #define trace_rcu_dyntick(polarity) do { } while (0) | 448 | #define trace_rcu_dyntick(polarity, nesting) do { } while (0) |
447 | #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0) | 449 | #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0) |
448 | #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0) | 450 | #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0) |
449 | #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0) | 451 | #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0) |