aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/tick.h
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-02-13 17:50:43 -0500
committerRafael J. Wysocki <rjw@rjwysocki.net>2015-02-15 13:40:09 -0500
commit124cf9117c5f93cc5b324530b7e105b09c729d5d (patch)
treee3416dc59e678015f41bd9b70dc1a8cc6145ca80 /include/linux/tick.h
parent060407aed56c00960c9b5f70f5d19b2823adffd7 (diff)
PM / sleep: Make it possible to quiesce timers during suspend-to-idle
The efficiency of suspend-to-idle depends on being able to keep CPUs in the deepest available idle states for as much time as possible. Ideally, they should only be brought out of idle by system wakeup interrupts. However, timer interrupts occurring periodically prevent that from happening and it is not practical to chase all of the "misbehaving" timers in a whack-a-mole fashion. A much more effective approach is to suspend the local ticks for all CPUs and the entire timekeeping along the lines of what is done during full suspend, which also helps to keep suspend-to-idle and full suspend reasonably similar. The idea is to suspend the local tick on each CPU executing cpuidle_enter_freeze() and to make the last of them suspend the entire timekeeping. That should prevent timer interrupts from triggering until an IO interrupt wakes up one of the CPUs. It needs to be done with interrupts disabled on all of the CPUs, though, because otherwise the suspended clocksource might be accessed by an interrupt handler which might lead to fatal consequences. Unfortunately, the existing ->enter callbacks provided by cpuidle drivers generally cannot be used for implementing that, because some of them re-enable interrupts temporarily and some idle entry methods cause interrupts to be re-enabled automatically on exit. Also some of these callbacks manipulate local clock event devices of the CPUs which really shouldn't be done after suspending their ticks. To overcome that difficulty, introduce a new cpuidle state callback, ->enter_freeze, that will be guaranteed (1) to keep interrupts disabled all the time (and return with interrupts disabled) and (2) not to touch the CPU timer devices. Modify cpuidle_enter_freeze() to look for the deepest available idle state with ->enter_freeze present and to make the CPU execute that callback with suspended tick (and the last of the online CPUs to execute it with suspended timekeeping). Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'include/linux/tick.h')
-rw-r--r--include/linux/tick.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/include/linux/tick.h b/include/linux/tick.h
index eda850ca757a..9c085dc12ae9 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -79,6 +79,9 @@ extern void __init tick_init(void);
79extern int tick_is_oneshot_available(void); 79extern int tick_is_oneshot_available(void);
80extern struct tick_device *tick_get_device(int cpu); 80extern struct tick_device *tick_get_device(int cpu);
81 81
82extern void tick_freeze(void);
83extern void tick_unfreeze(void);
84
82# ifdef CONFIG_HIGH_RES_TIMERS 85# ifdef CONFIG_HIGH_RES_TIMERS
83extern int tick_init_highres(void); 86extern int tick_init_highres(void);
84extern int tick_program_event(ktime_t expires, int force); 87extern int tick_program_event(ktime_t expires, int force);
@@ -119,6 +122,8 @@ static inline int tick_oneshot_mode_active(void) { return 0; }
119 122
120#else /* CONFIG_GENERIC_CLOCKEVENTS */ 123#else /* CONFIG_GENERIC_CLOCKEVENTS */
121static inline void tick_init(void) { } 124static inline void tick_init(void) { }
125static inline void tick_freeze(void) { }
126static inline void tick_unfreeze(void) { }
122static inline void tick_cancel_sched_timer(int cpu) { } 127static inline void tick_cancel_sched_timer(int cpu) { }
123static inline void tick_clock_notify(void) { } 128static inline void tick_clock_notify(void) { }
124static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } 129static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
@@ -226,5 +231,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk)
226 __tick_nohz_task_switch(tsk); 231 __tick_nohz_task_switch(tsk);
227} 232}
228 233
229
230#endif 234#endif