summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-04-05 12:58:27 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-04-05 12:58:47 -0400
commit0e7767687fdabfc58d5046e7488632bf2ecd4d0c (patch)
tree09a873c34178029672985ea9160d4542b1352f27
parentf2d285669aae656dfeafa0bf25e86bbbc5d22329 (diff)
time: tick-sched: Reorganize idle tick management code
Prepare the scheduler tick code for reworking the idle loop to avoid stopping the tick in some cases. The idea is to split the nohz idle entry call to decouple the idle time stats accounting and preparatory work from the actual tick stop code, in order to later be able to delay the tick stop once we reach more power-knowledgeable callers. Move away the tick_nohz_start_idle() invocation from __tick_nohz_idle_enter(), rename the latter to __tick_nohz_idle_stop_tick() and define tick_nohz_idle_stop_tick() as a wrapper around it for calling it from the outside. Make tick_nohz_idle_enter() only call tick_nohz_start_idle() instead of calling the entire __tick_nohz_idle_enter(), add another wrapper disabling and enabling interrupts around tick_nohz_idle_stop_tick() and make the current callers of tick_nohz_idle_enter() call it too to retain their current functionality. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-rw-r--r--arch/x86/xen/smp_pv.c1
-rw-r--r--include/linux/tick.h12
-rw-r--r--kernel/sched/idle.c1
-rw-r--r--kernel/time/tick-sched.c46
4 files changed, 39 insertions, 21 deletions
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index c0c756c76afe..2e20ae2fa2d6 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -425,6 +425,7 @@ static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
425 * data back is to call: 425 * data back is to call:
426 */ 426 */
427 tick_nohz_idle_enter(); 427 tick_nohz_idle_enter();
428 tick_nohz_idle_stop_tick_protected();
428 429
429 cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE); 430 cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE);
430} 431}
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 7f8c9a127f5a..1d253df9ea3c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -115,6 +115,7 @@ enum tick_dep_bits {
115extern bool tick_nohz_enabled; 115extern bool tick_nohz_enabled;
116extern bool tick_nohz_tick_stopped(void); 116extern bool tick_nohz_tick_stopped(void);
117extern bool tick_nohz_tick_stopped_cpu(int cpu); 117extern bool tick_nohz_tick_stopped_cpu(int cpu);
118extern void tick_nohz_idle_stop_tick(void);
118extern void tick_nohz_idle_enter(void); 119extern void tick_nohz_idle_enter(void);
119extern void tick_nohz_idle_exit(void); 120extern void tick_nohz_idle_exit(void);
120extern void tick_nohz_irq_exit(void); 121extern void tick_nohz_irq_exit(void);
@@ -123,10 +124,19 @@ extern unsigned long tick_nohz_get_idle_calls(void);
123extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); 124extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
124extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 125extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
125extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 126extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
127
128static inline void tick_nohz_idle_stop_tick_protected(void)
129{
130 local_irq_disable();
131 tick_nohz_idle_stop_tick();
132 local_irq_enable();
133}
134
126#else /* !CONFIG_NO_HZ_COMMON */ 135#else /* !CONFIG_NO_HZ_COMMON */
127#define tick_nohz_enabled (0) 136#define tick_nohz_enabled (0)
128static inline int tick_nohz_tick_stopped(void) { return 0; } 137static inline int tick_nohz_tick_stopped(void) { return 0; }
129static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; } 138static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
139static inline void tick_nohz_idle_stop_tick(void) { }
130static inline void tick_nohz_idle_enter(void) { } 140static inline void tick_nohz_idle_enter(void) { }
131static inline void tick_nohz_idle_exit(void) { } 141static inline void tick_nohz_idle_exit(void) { }
132 142
@@ -136,6 +146,8 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
136} 146}
137static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 147static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
138static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } 148static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
149
150static inline void tick_nohz_idle_stop_tick_protected(void) { }
139#endif /* !CONFIG_NO_HZ_COMMON */ 151#endif /* !CONFIG_NO_HZ_COMMON */
140 152
141#ifdef CONFIG_NO_HZ_FULL 153#ifdef CONFIG_NO_HZ_FULL
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 2975f195e1c4..c0bc111878e6 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -216,6 +216,7 @@ static void do_idle(void)
216 216
217 __current_set_polling(); 217 __current_set_polling();
218 tick_nohz_idle_enter(); 218 tick_nohz_idle_enter();
219 tick_nohz_idle_stop_tick_protected();
219 220
220 while (!need_resched()) { 221 while (!need_resched()) {
221 check_pgt_cache(); 222 check_pgt_cache();
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 5d4a0342f934..678349aec483 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -528,14 +528,11 @@ static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
528 sched_clock_idle_wakeup_event(); 528 sched_clock_idle_wakeup_event();
529} 529}
530 530
531static ktime_t tick_nohz_start_idle(struct tick_sched *ts) 531static void tick_nohz_start_idle(struct tick_sched *ts)
532{ 532{
533 ktime_t now = ktime_get(); 533 ts->idle_entrytime = ktime_get();
534
535 ts->idle_entrytime = now;
536 ts->idle_active = 1; 534 ts->idle_active = 1;
537 sched_clock_idle_sleep_event(); 535 sched_clock_idle_sleep_event();
538 return now;
539} 536}
540 537
541/** 538/**
@@ -894,19 +891,21 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
894 return true; 891 return true;
895} 892}
896 893
897static void __tick_nohz_idle_enter(struct tick_sched *ts) 894static void __tick_nohz_idle_stop_tick(struct tick_sched *ts)
898{ 895{
899 ktime_t now, expires; 896 ktime_t expires;
900 int cpu = smp_processor_id(); 897 int cpu = smp_processor_id();
901 898
902 now = tick_nohz_start_idle(ts);
903
904 if (can_stop_idle_tick(cpu, ts)) { 899 if (can_stop_idle_tick(cpu, ts)) {
905 int was_stopped = ts->tick_stopped; 900 int was_stopped = ts->tick_stopped;
906 901
907 ts->idle_calls++; 902 ts->idle_calls++;
908 903
909 expires = tick_nohz_stop_sched_tick(ts, now, cpu); 904 /*
905 * The idle entry time should be a sufficient approximation of
906 * the current time at this point.
907 */
908 expires = tick_nohz_stop_sched_tick(ts, ts->idle_entrytime, cpu);
910 if (expires > 0LL) { 909 if (expires > 0LL) {
911 ts->idle_sleeps++; 910 ts->idle_sleeps++;
912 ts->idle_expires = expires; 911 ts->idle_expires = expires;
@@ -920,16 +919,19 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
920} 919}
921 920
922/** 921/**
923 * tick_nohz_idle_enter - stop the idle tick from the idle task 922 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
924 * 923 *
925 * When the next event is more than a tick into the future, stop the idle tick 924 * When the next event is more than a tick into the future, stop the idle tick
926 * Called when we start the idle loop. 925 */
927 * 926void tick_nohz_idle_stop_tick(void)
928 * The arch is responsible of calling: 927{
928 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched));
929}
930
931/**
932 * tick_nohz_idle_enter - prepare for entering idle on the current CPU
929 * 933 *
930 * - rcu_idle_enter() after its last use of RCU before the CPU is put 934 * Called when we start the idle loop.
931 * to sleep.
932 * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
933 */ 935 */
934void tick_nohz_idle_enter(void) 936void tick_nohz_idle_enter(void)
935{ 937{
@@ -941,7 +943,7 @@ void tick_nohz_idle_enter(void)
941 943
942 ts = this_cpu_ptr(&tick_cpu_sched); 944 ts = this_cpu_ptr(&tick_cpu_sched);
943 ts->inidle = 1; 945 ts->inidle = 1;
944 __tick_nohz_idle_enter(ts); 946 tick_nohz_start_idle(ts);
945 947
946 local_irq_enable(); 948 local_irq_enable();
947} 949}
@@ -958,10 +960,12 @@ void tick_nohz_irq_exit(void)
958{ 960{
959 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 961 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
960 962
961 if (ts->inidle) 963 if (ts->inidle) {
962 __tick_nohz_idle_enter(ts); 964 tick_nohz_start_idle(ts);
963 else 965 __tick_nohz_idle_stop_tick(ts);
966 } else {
964 tick_nohz_full_update_tick(ts); 967 tick_nohz_full_update_tick(ts);
968 }
965} 969}
966 970
967/** 971/**