diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 06:30:57 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 06:30:57 -0400 |
| commit | afa3536be88b435a057cb727b48fd3d760a497d2 (patch) | |
| tree | 8562d3c8327286746ae835ef8eb39d4494a1054d /kernel | |
| parent | 35a9ad8af0bb0fa3525e6d0d20e32551d226f38e (diff) | |
| parent | 9b01f5bf3999a3db5b1bbd9fdfd80d8d304e94ee (diff) | |
Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer fixes from Ingo Molnar:
"Main changes:
- Fix the deadlock reported by Dave Jones et al
- Clean up and fix nohz_full interaction with arch abilities
- nohz init code consolidation/cleanup"
* 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
nohz: nohz full depends on irq work self IPI support
nohz: Consolidate nohz full init code
arm64: Tell irq work about self IPI support
arm: Tell irq work about self IPI support
x86: Tell irq work about self IPI support
irq_work: Force raised irq work to run on irq work interrupt
irq_work: Introduce arch_irq_work_has_interrupt()
nohz: Move nohz full init call to tick init
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/irq_work.c | 15 | ||||
| -rw-r--r-- | kernel/time/tick-common.c | 1 | ||||
| -rw-r--r-- | kernel/time/tick-internal.h | 7 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 52 | ||||
| -rw-r--r-- | kernel/time/timer.c | 2 |
5 files changed, 55 insertions, 22 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index e6bcbe756663..385b85aded19 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
| @@ -115,8 +115,10 @@ bool irq_work_needs_cpu(void) | |||
| 115 | 115 | ||
| 116 | raised = &__get_cpu_var(raised_list); | 116 | raised = &__get_cpu_var(raised_list); |
| 117 | lazy = &__get_cpu_var(lazy_list); | 117 | lazy = &__get_cpu_var(lazy_list); |
| 118 | if (llist_empty(raised) && llist_empty(lazy)) | 118 | |
| 119 | return false; | 119 | if (llist_empty(raised) || arch_irq_work_has_interrupt()) |
| 120 | if (llist_empty(lazy)) | ||
| 121 | return false; | ||
| 120 | 122 | ||
| 121 | /* All work should have been flushed before going offline */ | 123 | /* All work should have been flushed before going offline */ |
| 122 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); | 124 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); |
| @@ -171,6 +173,15 @@ void irq_work_run(void) | |||
| 171 | } | 173 | } |
| 172 | EXPORT_SYMBOL_GPL(irq_work_run); | 174 | EXPORT_SYMBOL_GPL(irq_work_run); |
| 173 | 175 | ||
| 176 | void irq_work_tick(void) | ||
| 177 | { | ||
| 178 | struct llist_head *raised = &__get_cpu_var(raised_list); | ||
| 179 | |||
| 180 | if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) | ||
| 181 | irq_work_run_list(raised); | ||
| 182 | irq_work_run_list(&__get_cpu_var(lazy_list)); | ||
| 183 | } | ||
| 184 | |||
| 174 | /* | 185 | /* |
| 175 | * Synchronize against the irq_work @entry, ensures the entry is not | 186 | * Synchronize against the irq_work @entry, ensures the entry is not |
| 176 | * currently in use. | 187 | * currently in use. |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 0a0608edeb26..052b4b53c3d6 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -400,4 +400,5 @@ void tick_resume(void) | |||
| 400 | void __init tick_init(void) | 400 | void __init tick_init(void) |
| 401 | { | 401 | { |
| 402 | tick_broadcast_init(); | 402 | tick_broadcast_init(); |
| 403 | tick_nohz_init(); | ||
| 403 | } | 404 | } |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index c19c1d84b6f3..366aeb4f2c66 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
| @@ -99,6 +99,13 @@ static inline int tick_broadcast_oneshot_active(void) { return 0; } | |||
| 99 | static inline bool tick_broadcast_oneshot_available(void) { return false; } | 99 | static inline bool tick_broadcast_oneshot_available(void) { return false; } |
| 100 | #endif /* !TICK_ONESHOT */ | 100 | #endif /* !TICK_ONESHOT */ |
| 101 | 101 | ||
| 102 | /* NO_HZ_FULL internal */ | ||
| 103 | #ifdef CONFIG_NO_HZ_FULL | ||
| 104 | extern void tick_nohz_init(void); | ||
| 105 | # else | ||
| 106 | static inline void tick_nohz_init(void) { } | ||
| 107 | #endif | ||
| 108 | |||
| 102 | /* | 109 | /* |
| 103 | * Broadcasting support | 110 | * Broadcasting support |
| 104 | */ | 111 | */ |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f654a8a298fa..5a9ff243588c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -295,22 +295,12 @@ out: | |||
| 295 | /* Parse the boot-time nohz CPU list from the kernel parameters. */ | 295 | /* Parse the boot-time nohz CPU list from the kernel parameters. */ |
| 296 | static int __init tick_nohz_full_setup(char *str) | 296 | static int __init tick_nohz_full_setup(char *str) |
| 297 | { | 297 | { |
| 298 | int cpu; | ||
| 299 | |||
| 300 | alloc_bootmem_cpumask_var(&tick_nohz_full_mask); | 298 | alloc_bootmem_cpumask_var(&tick_nohz_full_mask); |
| 301 | alloc_bootmem_cpumask_var(&housekeeping_mask); | ||
| 302 | if (cpulist_parse(str, tick_nohz_full_mask) < 0) { | 299 | if (cpulist_parse(str, tick_nohz_full_mask) < 0) { |
| 303 | pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); | 300 | pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); |
| 301 | free_bootmem_cpumask_var(tick_nohz_full_mask); | ||
| 304 | return 1; | 302 | return 1; |
| 305 | } | 303 | } |
| 306 | |||
| 307 | cpu = smp_processor_id(); | ||
| 308 | if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { | ||
| 309 | pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); | ||
| 310 | cpumask_clear_cpu(cpu, tick_nohz_full_mask); | ||
| 311 | } | ||
| 312 | cpumask_andnot(housekeeping_mask, | ||
| 313 | cpu_possible_mask, tick_nohz_full_mask); | ||
| 314 | tick_nohz_full_running = true; | 304 | tick_nohz_full_running = true; |
| 315 | 305 | ||
| 316 | return 1; | 306 | return 1; |
| @@ -349,18 +339,11 @@ static int tick_nohz_init_all(void) | |||
| 349 | 339 | ||
| 350 | #ifdef CONFIG_NO_HZ_FULL_ALL | 340 | #ifdef CONFIG_NO_HZ_FULL_ALL |
| 351 | if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) { | 341 | if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) { |
| 352 | pr_err("NO_HZ: Can't allocate full dynticks cpumask\n"); | 342 | WARN(1, "NO_HZ: Can't allocate full dynticks cpumask\n"); |
| 353 | return err; | ||
| 354 | } | ||
| 355 | if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) { | ||
| 356 | pr_err("NO_HZ: Can't allocate not-full dynticks cpumask\n"); | ||
| 357 | return err; | 343 | return err; |
| 358 | } | 344 | } |
| 359 | err = 0; | 345 | err = 0; |
| 360 | cpumask_setall(tick_nohz_full_mask); | 346 | cpumask_setall(tick_nohz_full_mask); |
| 361 | cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask); | ||
| 362 | cpumask_clear(housekeeping_mask); | ||
| 363 | cpumask_set_cpu(smp_processor_id(), housekeeping_mask); | ||
| 364 | tick_nohz_full_running = true; | 347 | tick_nohz_full_running = true; |
| 365 | #endif | 348 | #endif |
| 366 | return err; | 349 | return err; |
| @@ -375,6 +358,37 @@ void __init tick_nohz_init(void) | |||
| 375 | return; | 358 | return; |
| 376 | } | 359 | } |
| 377 | 360 | ||
| 361 | if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) { | ||
| 362 | WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n"); | ||
| 363 | cpumask_clear(tick_nohz_full_mask); | ||
| 364 | tick_nohz_full_running = false; | ||
| 365 | return; | ||
| 366 | } | ||
| 367 | |||
| 368 | /* | ||
| 369 | * Full dynticks uses irq work to drive the tick rescheduling on safe | ||
| 370 | * locking contexts. But then we need irq work to raise its own | ||
| 371 | * interrupts to avoid circular dependency on the tick | ||
| 372 | */ | ||
| 373 | if (!arch_irq_work_has_interrupt()) { | ||
| 374 | pr_warning("NO_HZ: Can't run full dynticks because arch doesn't " | ||
| 375 | "support irq work self-IPIs\n"); | ||
| 376 | cpumask_clear(tick_nohz_full_mask); | ||
| 377 | cpumask_copy(housekeeping_mask, cpu_possible_mask); | ||
| 378 | tick_nohz_full_running = false; | ||
| 379 | return; | ||
| 380 | } | ||
| 381 | |||
| 382 | cpu = smp_processor_id(); | ||
| 383 | |||
| 384 | if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { | ||
| 385 | pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); | ||
| 386 | cpumask_clear_cpu(cpu, tick_nohz_full_mask); | ||
| 387 | } | ||
| 388 | |||
| 389 | cpumask_andnot(housekeeping_mask, | ||
| 390 | cpu_possible_mask, tick_nohz_full_mask); | ||
| 391 | |||
| 378 | for_each_cpu(cpu, tick_nohz_full_mask) | 392 | for_each_cpu(cpu, tick_nohz_full_mask) |
| 379 | context_tracking_cpu_set(cpu); | 393 | context_tracking_cpu_set(cpu); |
| 380 | 394 | ||
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index aca5dfe2fa3d..9bbb8344ed3b 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c | |||
| @@ -1385,7 +1385,7 @@ void update_process_times(int user_tick) | |||
| 1385 | rcu_check_callbacks(cpu, user_tick); | 1385 | rcu_check_callbacks(cpu, user_tick); |
| 1386 | #ifdef CONFIG_IRQ_WORK | 1386 | #ifdef CONFIG_IRQ_WORK |
| 1387 | if (in_irq()) | 1387 | if (in_irq()) |
| 1388 | irq_work_run(); | 1388 | irq_work_tick(); |
| 1389 | #endif | 1389 | #endif |
| 1390 | scheduler_tick(); | 1390 | scheduler_tick(); |
| 1391 | run_posix_cpu_timers(p); | 1391 | run_posix_cpu_timers(p); |
