diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2015-03-25 08:11:04 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2015-04-01 08:23:00 -0400 |
| commit | f46481d0a7cb942b84145acb80ad43bdb1ff8eb4 (patch) | |
| tree | 93ef806641010b07d8405b144203d1e862d034d7 /kernel/time | |
| parent | 080873ce2d1abd8c0a2b8c87bfa0762546a6b713 (diff) | |
tick/xen: Provide and use tick_suspend_local() and tick_resume_local()
Xen calls on every cpu into tick_resume() which is just wrong.
tick_resume() is for the syscore global suspend/resume
invocation. What XEN really wants is a per cpu local resume
function.
Provide a tick_resume_local() function and use it in XEN.
Also provide a complementary tick_suspend_local() and modify
tick_unfreeze() and tick_freeze(), respectively, to use the
new local tick resume/suspend functions.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[ Combined two patches, rebased, modified subject/changelog. ]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1698741.eezk9tnXtG@vostro.rjw.lan
[ Merged to latest timers/core. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
| -rw-r--r-- | kernel/time/tick-broadcast.c | 24 | ||||
| -rw-r--r-- | kernel/time/tick-common.c | 55 | ||||
| -rw-r--r-- | kernel/time/tick-internal.h | 8 |
3 files changed, 61 insertions, 26 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 60e6c23ce1c7..19cfb381faa9 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -455,11 +455,26 @@ void tick_suspend_broadcast(void) | |||
| 455 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 455 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 456 | } | 456 | } |
| 457 | 457 | ||
| 458 | int tick_resume_broadcast(void) | 458 | /* |
| 459 | * This is called from tick_resume_local() on a resuming CPU. That's | ||
| 460 | * called from the core resume function, tick_unfreeze() and the magic XEN | ||
| 461 | * resume hackery. | ||
| 462 | * | ||
| 463 | * In none of these cases the broadcast device mode can change and the | ||
| 464 | * bit of the resuming CPU in the broadcast mask is safe as well. | ||
| 465 | */ | ||
| 466 | bool tick_resume_check_broadcast(void) | ||
| 467 | { | ||
| 468 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) | ||
| 469 | return false; | ||
| 470 | else | ||
| 471 | return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask); | ||
| 472 | } | ||
| 473 | |||
| 474 | void tick_resume_broadcast(void) | ||
| 459 | { | 475 | { |
| 460 | struct clock_event_device *bc; | 476 | struct clock_event_device *bc; |
| 461 | unsigned long flags; | 477 | unsigned long flags; |
| 462 | int broadcast = 0; | ||
| 463 | 478 | ||
| 464 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 479 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
| 465 | 480 | ||
| @@ -472,8 +487,6 @@ int tick_resume_broadcast(void) | |||
| 472 | case TICKDEV_MODE_PERIODIC: | 487 | case TICKDEV_MODE_PERIODIC: |
| 473 | if (!cpumask_empty(tick_broadcast_mask)) | 488 | if (!cpumask_empty(tick_broadcast_mask)) |
| 474 | tick_broadcast_start_periodic(bc); | 489 | tick_broadcast_start_periodic(bc); |
| 475 | broadcast = cpumask_test_cpu(smp_processor_id(), | ||
| 476 | tick_broadcast_mask); | ||
| 477 | break; | 490 | break; |
| 478 | case TICKDEV_MODE_ONESHOT: | 491 | case TICKDEV_MODE_ONESHOT: |
| 479 | if (!cpumask_empty(tick_broadcast_mask)) | 492 | if (!cpumask_empty(tick_broadcast_mask)) |
| @@ -482,11 +495,8 @@ int tick_resume_broadcast(void) | |||
| 482 | } | 495 | } |
| 483 | } | 496 | } |
| 484 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 497 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 485 | |||
| 486 | return broadcast; | ||
| 487 | } | 498 | } |
| 488 | 499 | ||
| 489 | |||
| 490 | #ifdef CONFIG_TICK_ONESHOT | 500 | #ifdef CONFIG_TICK_ONESHOT |
| 491 | 501 | ||
| 492 | static cpumask_var_t tick_broadcast_oneshot_mask; | 502 | static cpumask_var_t tick_broadcast_oneshot_mask; |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 1a60c2ae96a8..da796d65d1fb 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -374,40 +374,32 @@ void tick_shutdown(unsigned int *cpup) | |||
| 374 | } | 374 | } |
| 375 | 375 | ||
| 376 | /** | 376 | /** |
| 377 | * tick_suspend - Suspend the tick and the broadcast device | 377 | * tick_suspend_local - Suspend the local tick device |
| 378 | * | 378 | * |
| 379 | * Called from syscore_suspend() via timekeeping_suspend with only one | 379 | * Called from the local cpu for freeze with interrupts disabled. |
| 380 | * CPU online and interrupts disabled or from tick_unfreeze() under | ||
| 381 | * tick_freeze_lock. | ||
| 382 | * | 380 | * |
| 383 | * No locks required. Nothing can change the per cpu device. | 381 | * No locks required. Nothing can change the per cpu device. |
| 384 | */ | 382 | */ |
| 385 | void tick_suspend(void) | 383 | static void tick_suspend_local(void) |
| 386 | { | 384 | { |
| 387 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); | 385 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
| 388 | 386 | ||
| 389 | clockevents_shutdown(td->evtdev); | 387 | clockevents_shutdown(td->evtdev); |
| 390 | tick_suspend_broadcast(); | ||
| 391 | } | 388 | } |
| 392 | 389 | ||
| 393 | /** | 390 | /** |
| 394 | * tick_resume - Resume the tick and the broadcast device | 391 | * tick_resume_local - Resume the local tick device |
| 395 | * | 392 | * |
| 396 | * Called from syscore_resume() via timekeeping_resume with only one | 393 | * Called from the local CPU for unfreeze or XEN resume magic. |
| 397 | * CPU online and interrupts disabled or from tick_unfreeze() under | ||
| 398 | * tick_freeze_lock. | ||
| 399 | * | 394 | * |
| 400 | * No locks required. Nothing can change the per cpu device. | 395 | * No locks required. Nothing can change the per cpu device. |
| 401 | */ | 396 | */ |
| 402 | void tick_resume(void) | 397 | void tick_resume_local(void) |
| 403 | { | 398 | { |
| 404 | struct tick_device *td; | 399 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
| 405 | int broadcast; | 400 | bool broadcast = tick_resume_check_broadcast(); |
| 406 | 401 | ||
| 407 | broadcast = tick_resume_broadcast(); | ||
| 408 | td = this_cpu_ptr(&tick_cpu_device); | ||
| 409 | clockevents_tick_resume(td->evtdev); | 402 | clockevents_tick_resume(td->evtdev); |
| 410 | |||
| 411 | if (!broadcast) { | 403 | if (!broadcast) { |
| 412 | if (td->mode == TICKDEV_MODE_PERIODIC) | 404 | if (td->mode == TICKDEV_MODE_PERIODIC) |
| 413 | tick_setup_periodic(td->evtdev, 0); | 405 | tick_setup_periodic(td->evtdev, 0); |
| @@ -416,6 +408,35 @@ void tick_resume(void) | |||
| 416 | } | 408 | } |
| 417 | } | 409 | } |
| 418 | 410 | ||
| 411 | /** | ||
| 412 | * tick_suspend - Suspend the tick and the broadcast device | ||
| 413 | * | ||
| 414 | * Called from syscore_suspend() via timekeeping_suspend with only one | ||
| 415 | * CPU online and interrupts disabled or from tick_unfreeze() under | ||
| 416 | * tick_freeze_lock. | ||
| 417 | * | ||
| 418 | * No locks required. Nothing can change the per cpu device. | ||
| 419 | */ | ||
| 420 | void tick_suspend(void) | ||
| 421 | { | ||
| 422 | tick_suspend_local(); | ||
| 423 | tick_suspend_broadcast(); | ||
| 424 | } | ||
| 425 | |||
| 426 | /** | ||
| 427 | * tick_resume - Resume the tick and the broadcast device | ||
| 428 | * | ||
| 429 | * Called from syscore_resume() via timekeeping_resume with only one | ||
| 430 | * CPU online and interrupts disabled. | ||
| 431 | * | ||
| 432 | * No locks required. Nothing can change the per cpu device. | ||
| 433 | */ | ||
| 434 | void tick_resume(void) | ||
| 435 | { | ||
| 436 | tick_resume_broadcast(); | ||
| 437 | tick_resume_local(); | ||
| 438 | } | ||
| 439 | |||
| 419 | static DEFINE_RAW_SPINLOCK(tick_freeze_lock); | 440 | static DEFINE_RAW_SPINLOCK(tick_freeze_lock); |
| 420 | static unsigned int tick_freeze_depth; | 441 | static unsigned int tick_freeze_depth; |
| 421 | 442 | ||
| @@ -436,7 +457,7 @@ void tick_freeze(void) | |||
| 436 | if (tick_freeze_depth == num_online_cpus()) { | 457 | if (tick_freeze_depth == num_online_cpus()) { |
| 437 | timekeeping_suspend(); | 458 | timekeeping_suspend(); |
| 438 | } else { | 459 | } else { |
| 439 | tick_suspend(); | 460 | tick_suspend_local(); |
| 440 | } | 461 | } |
| 441 | 462 | ||
| 442 | raw_spin_unlock(&tick_freeze_lock); | 463 | raw_spin_unlock(&tick_freeze_lock); |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 5c9f0eec56b2..6ba7bce732f2 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
| @@ -23,6 +23,7 @@ extern void tick_check_new_device(struct clock_event_device *dev); | |||
| 23 | extern void tick_handover_do_timer(int *cpup); | 23 | extern void tick_handover_do_timer(int *cpup); |
| 24 | extern void tick_shutdown(unsigned int *cpup); | 24 | extern void tick_shutdown(unsigned int *cpup); |
| 25 | extern void tick_suspend(void); | 25 | extern void tick_suspend(void); |
| 26 | extern void tick_resume(void); | ||
| 26 | extern bool tick_check_replacement(struct clock_event_device *curdev, | 27 | extern bool tick_check_replacement(struct clock_event_device *curdev, |
| 27 | struct clock_event_device *newdev); | 28 | struct clock_event_device *newdev); |
| 28 | extern void tick_install_replacement(struct clock_event_device *dev); | 29 | extern void tick_install_replacement(struct clock_event_device *dev); |
| @@ -43,6 +44,7 @@ extern int __clockevents_update_freq(struct clock_event_device *dev, u32 freq); | |||
| 43 | extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); | 44 | extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); |
| 44 | #else | 45 | #else |
| 45 | static inline void tick_suspend(void) { } | 46 | static inline void tick_suspend(void) { } |
| 47 | static inline void tick_resume(void) { } | ||
| 46 | #endif /* GENERIC_CLOCKEVENTS */ | 48 | #endif /* GENERIC_CLOCKEVENTS */ |
| 47 | 49 | ||
| 48 | /* Oneshot related functions */ | 50 | /* Oneshot related functions */ |
| @@ -81,7 +83,8 @@ extern int tick_is_broadcast_device(struct clock_event_device *dev); | |||
| 81 | extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); | 83 | extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); |
| 82 | extern void tick_shutdown_broadcast(unsigned int *cpup); | 84 | extern void tick_shutdown_broadcast(unsigned int *cpup); |
| 83 | extern void tick_suspend_broadcast(void); | 85 | extern void tick_suspend_broadcast(void); |
| 84 | extern int tick_resume_broadcast(void); | 86 | extern void tick_resume_broadcast(void); |
| 87 | extern bool tick_resume_check_broadcast(void); | ||
| 85 | extern void tick_broadcast_init(void); | 88 | extern void tick_broadcast_init(void); |
| 86 | extern void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); | 89 | extern void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); |
| 87 | extern int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq); | 90 | extern int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq); |
| @@ -95,7 +98,8 @@ static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } | |||
| 95 | static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { } | 98 | static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { } |
| 96 | static inline void tick_shutdown_broadcast(unsigned int *cpup) { } | 99 | static inline void tick_shutdown_broadcast(unsigned int *cpup) { } |
| 97 | static inline void tick_suspend_broadcast(void) { } | 100 | static inline void tick_suspend_broadcast(void) { } |
| 98 | static inline int tick_resume_broadcast(void) { return 0; } | 101 | static inline void tick_resume_broadcast(void) { } |
| 102 | static inline bool tick_resume_check_broadcast(void) { return false; } | ||
| 99 | static inline void tick_broadcast_init(void) { } | 103 | static inline void tick_broadcast_init(void) { } |
| 100 | static inline int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) { return -ENODEV; } | 104 | static inline int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) { return -ENODEV; } |
| 101 | 105 | ||
