diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-04 12:36:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-04 12:36:54 -0400 |
commit | 6832d9652f395f7d13003e3884942c40f52ac1fa (patch) | |
tree | 40555ad5eda9700cb973dac4db136ad97f5e8b19 /kernel/time | |
parent | 228abe73ad67665d71eacd6a8a347dd76b0115ae (diff) | |
parent | c2e7fcf53c3cb02b4ada1c66a9bc8a4d97d58aba (diff) |
Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timers/nohz changes from Ingo Molnar:
"It mostly contains fixes and full dynticks off-case optimizations, by
Frederic Weisbecker"
* 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
nohz: Include local CPU in full dynticks global kick
nohz: Optimize full dynticks's sched hooks with static keys
nohz: Optimize full dynticks state checks with static keys
nohz: Rename a few state variables
vtime: Always debug check snapshot source _before_ updating it
vtime: Always scale generic vtime accounting results
vtime: Optimize full dynticks accounting off case with static keys
vtime: Describe overriden functions in dedicated arch headers
m68k: hardirq_count() only need preempt_mask.h
hardirq: Split preempt count mask definitions
context_tracking: Split low level state headers
vtime: Fix racy cputime delta update
vtime: Remove a few unneeded generic vtime state checks
context_tracking: User/kernel broundary cross trace events
context_tracking: Optimize context switch off case with static keys
context_tracking: Optimize guest APIs off case with static key
context_tracking: Optimize main APIs off case with static key
context_tracking: Ground setup for static key use
context_tracking: Remove full dynticks' hacky dependency on wide context tracking
nohz: Only enable context tracking on full dynticks CPUs
...
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/Kconfig | 1 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 61 |
2 files changed, 29 insertions, 33 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 3381f098070f..2b62fe86f9ec 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
@@ -105,7 +105,6 @@ config NO_HZ_FULL | |||
105 | select RCU_USER_QS | 105 | select RCU_USER_QS |
106 | select RCU_NOCB_CPU | 106 | select RCU_NOCB_CPU |
107 | select VIRT_CPU_ACCOUNTING_GEN | 107 | select VIRT_CPU_ACCOUNTING_GEN |
108 | select CONTEXT_TRACKING_FORCE | ||
109 | select IRQ_WORK | 108 | select IRQ_WORK |
110 | help | 109 | help |
111 | Adaptively try to shutdown the tick whenever possible, even when | 110 | Adaptively try to shutdown the tick whenever possible, even when |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e8a1516cc0a3..3612fc77f834 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/irq_work.h> | 23 | #include <linux/irq_work.h> |
24 | #include <linux/posix-timers.h> | 24 | #include <linux/posix-timers.h> |
25 | #include <linux/perf_event.h> | 25 | #include <linux/perf_event.h> |
26 | #include <linux/context_tracking.h> | ||
26 | 27 | ||
27 | #include <asm/irq_regs.h> | 28 | #include <asm/irq_regs.h> |
28 | 29 | ||
@@ -148,8 +149,8 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) | |||
148 | } | 149 | } |
149 | 150 | ||
150 | #ifdef CONFIG_NO_HZ_FULL | 151 | #ifdef CONFIG_NO_HZ_FULL |
151 | static cpumask_var_t nohz_full_mask; | 152 | cpumask_var_t tick_nohz_full_mask; |
152 | bool have_nohz_full_mask; | 153 | bool tick_nohz_full_running; |
153 | 154 | ||
154 | static bool can_stop_full_tick(void) | 155 | static bool can_stop_full_tick(void) |
155 | { | 156 | { |
@@ -182,7 +183,7 @@ static bool can_stop_full_tick(void) | |||
182 | * Don't allow the user to think they can get | 183 | * Don't allow the user to think they can get |
183 | * full NO_HZ with this machine. | 184 | * full NO_HZ with this machine. |
184 | */ | 185 | */ |
185 | WARN_ONCE(have_nohz_full_mask, | 186 | WARN_ONCE(tick_nohz_full_running, |
186 | "NO_HZ FULL will not work with unstable sched clock"); | 187 | "NO_HZ FULL will not work with unstable sched clock"); |
187 | return false; | 188 | return false; |
188 | } | 189 | } |
@@ -197,7 +198,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now); | |||
197 | * Re-evaluate the need for the tick on the current CPU | 198 | * Re-evaluate the need for the tick on the current CPU |
198 | * and restart it if necessary. | 199 | * and restart it if necessary. |
199 | */ | 200 | */ |
200 | void tick_nohz_full_check(void) | 201 | void __tick_nohz_full_check(void) |
201 | { | 202 | { |
202 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 203 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
203 | 204 | ||
@@ -211,7 +212,7 @@ void tick_nohz_full_check(void) | |||
211 | 212 | ||
212 | static void nohz_full_kick_work_func(struct irq_work *work) | 213 | static void nohz_full_kick_work_func(struct irq_work *work) |
213 | { | 214 | { |
214 | tick_nohz_full_check(); | 215 | __tick_nohz_full_check(); |
215 | } | 216 | } |
216 | 217 | ||
217 | static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { | 218 | static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { |
@@ -230,7 +231,7 @@ void tick_nohz_full_kick(void) | |||
230 | 231 | ||
231 | static void nohz_full_kick_ipi(void *info) | 232 | static void nohz_full_kick_ipi(void *info) |
232 | { | 233 | { |
233 | tick_nohz_full_check(); | 234 | __tick_nohz_full_check(); |
234 | } | 235 | } |
235 | 236 | ||
236 | /* | 237 | /* |
@@ -239,12 +240,13 @@ static void nohz_full_kick_ipi(void *info) | |||
239 | */ | 240 | */ |
240 | void tick_nohz_full_kick_all(void) | 241 | void tick_nohz_full_kick_all(void) |
241 | { | 242 | { |
242 | if (!have_nohz_full_mask) | 243 | if (!tick_nohz_full_running) |
243 | return; | 244 | return; |
244 | 245 | ||
245 | preempt_disable(); | 246 | preempt_disable(); |
246 | smp_call_function_many(nohz_full_mask, | 247 | smp_call_function_many(tick_nohz_full_mask, |
247 | nohz_full_kick_ipi, NULL, false); | 248 | nohz_full_kick_ipi, NULL, false); |
249 | tick_nohz_full_kick(); | ||
248 | preempt_enable(); | 250 | preempt_enable(); |
249 | } | 251 | } |
250 | 252 | ||
@@ -253,7 +255,7 @@ void tick_nohz_full_kick_all(void) | |||
253 | * It might need the tick due to per task/process properties: | 255 | * It might need the tick due to per task/process properties: |
254 | * perf events, posix cpu timers, ... | 256 | * perf events, posix cpu timers, ... |
255 | */ | 257 | */ |
256 | void tick_nohz_task_switch(struct task_struct *tsk) | 258 | void __tick_nohz_task_switch(struct task_struct *tsk) |
257 | { | 259 | { |
258 | unsigned long flags; | 260 | unsigned long flags; |
259 | 261 | ||
@@ -269,31 +271,23 @@ out: | |||
269 | local_irq_restore(flags); | 271 | local_irq_restore(flags); |
270 | } | 272 | } |
271 | 273 | ||
272 | int tick_nohz_full_cpu(int cpu) | ||
273 | { | ||
274 | if (!have_nohz_full_mask) | ||
275 | return 0; | ||
276 | |||
277 | return cpumask_test_cpu(cpu, nohz_full_mask); | ||
278 | } | ||
279 | |||
280 | /* Parse the boot-time nohz CPU list from the kernel parameters. */ | 274 | /* Parse the boot-time nohz CPU list from the kernel parameters. */ |
281 | static int __init tick_nohz_full_setup(char *str) | 275 | static int __init tick_nohz_full_setup(char *str) |
282 | { | 276 | { |
283 | int cpu; | 277 | int cpu; |
284 | 278 | ||
285 | alloc_bootmem_cpumask_var(&nohz_full_mask); | 279 | alloc_bootmem_cpumask_var(&tick_nohz_full_mask); |
286 | if (cpulist_parse(str, nohz_full_mask) < 0) { | 280 | if (cpulist_parse(str, tick_nohz_full_mask) < 0) { |
287 | pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); | 281 | pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); |
288 | return 1; | 282 | return 1; |
289 | } | 283 | } |
290 | 284 | ||
291 | cpu = smp_processor_id(); | 285 | cpu = smp_processor_id(); |
292 | if (cpumask_test_cpu(cpu, nohz_full_mask)) { | 286 | if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { |
293 | pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); | 287 | pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); |
294 | cpumask_clear_cpu(cpu, nohz_full_mask); | 288 | cpumask_clear_cpu(cpu, tick_nohz_full_mask); |
295 | } | 289 | } |
296 | have_nohz_full_mask = true; | 290 | tick_nohz_full_running = true; |
297 | 291 | ||
298 | return 1; | 292 | return 1; |
299 | } | 293 | } |
@@ -311,7 +305,7 @@ static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, | |||
311 | * If we handle the timekeeping duty for full dynticks CPUs, | 305 | * If we handle the timekeeping duty for full dynticks CPUs, |
312 | * we can't safely shutdown that CPU. | 306 | * we can't safely shutdown that CPU. |
313 | */ | 307 | */ |
314 | if (have_nohz_full_mask && tick_do_timer_cpu == cpu) | 308 | if (tick_nohz_full_running && tick_do_timer_cpu == cpu) |
315 | return NOTIFY_BAD; | 309 | return NOTIFY_BAD; |
316 | break; | 310 | break; |
317 | } | 311 | } |
@@ -330,31 +324,34 @@ static int tick_nohz_init_all(void) | |||
330 | int err = -1; | 324 | int err = -1; |
331 | 325 | ||
332 | #ifdef CONFIG_NO_HZ_FULL_ALL | 326 | #ifdef CONFIG_NO_HZ_FULL_ALL |
333 | if (!alloc_cpumask_var(&nohz_full_mask, GFP_KERNEL)) { | 327 | if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) { |
334 | pr_err("NO_HZ: Can't allocate full dynticks cpumask\n"); | 328 | pr_err("NO_HZ: Can't allocate full dynticks cpumask\n"); |
335 | return err; | 329 | return err; |
336 | } | 330 | } |
337 | err = 0; | 331 | err = 0; |
338 | cpumask_setall(nohz_full_mask); | 332 | cpumask_setall(tick_nohz_full_mask); |
339 | cpumask_clear_cpu(smp_processor_id(), nohz_full_mask); | 333 | cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask); |
340 | have_nohz_full_mask = true; | 334 | tick_nohz_full_running = true; |
341 | #endif | 335 | #endif |
342 | return err; | 336 | return err; |
343 | } | 337 | } |
344 | 338 | ||
345 | void __init tick_nohz_init(void) | 339 | void __init tick_nohz_init(void) |
346 | { | 340 | { |
347 | if (!have_nohz_full_mask) { | 341 | int cpu; |
342 | |||
343 | if (!tick_nohz_full_running) { | ||
348 | if (tick_nohz_init_all() < 0) | 344 | if (tick_nohz_init_all() < 0) |
349 | return; | 345 | return; |
350 | } | 346 | } |
351 | 347 | ||
348 | for_each_cpu(cpu, tick_nohz_full_mask) | ||
349 | context_tracking_cpu_set(cpu); | ||
350 | |||
352 | cpu_notifier(tick_nohz_cpu_down_callback, 0); | 351 | cpu_notifier(tick_nohz_cpu_down_callback, 0); |
353 | cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); | 352 | cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), tick_nohz_full_mask); |
354 | pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); | 353 | pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); |
355 | } | 354 | } |
356 | #else | ||
357 | #define have_nohz_full_mask (0) | ||
358 | #endif | 355 | #endif |
359 | 356 | ||
360 | /* | 357 | /* |
@@ -732,7 +729,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |||
732 | return false; | 729 | return false; |
733 | } | 730 | } |
734 | 731 | ||
735 | if (have_nohz_full_mask) { | 732 | if (tick_nohz_full_enabled()) { |
736 | /* | 733 | /* |
737 | * Keep the tick alive to guarantee timekeeping progression | 734 | * Keep the tick alive to guarantee timekeeping progression |
738 | * if there are full dynticks CPUs around | 735 | * if there are full dynticks CPUs around |