aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2009-09-29 08:25:15 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-11-05 01:53:53 -0500
commiteed3b9cf3fe3fcc7a50238dfcab63a63914e8f42 (patch)
treeb8db321400bc9a17e7cfefb82dbfb515a841fab2 /kernel
parent7bc7d637452383d56ba4368d4336b0dde1bb476d (diff)
nohz: Reuse ktime in sub-functions of tick_check_idle.
On a system with NOHZ=y tick_check_idle calls tick_nohz_stop_idle and tick_nohz_update_jiffies. Given the right conditions (ts->idle_active and/or ts->tick_stopped) both function get a time stamp with ktime_get. The same time stamp can be reused if both function require one. On s390 this change has the additional benefit that gcc inlines the tick_nohz_stop_idle function into tick_check_idle. The number of instructions to execute tick_check_idle drops from 225 to 144 (without the ktime_get optimization it is 367 vs 215 instructions). before: 0) | tick_check_idle() { 0) | tick_nohz_stop_idle() { 0) | ktime_get() { 0) | read_tod_clock() { 0) 0.601 us | } 0) 1.765 us | } 0) 3.047 us | } 0) | ktime_get() { 0) | read_tod_clock() { 0) 0.570 us | } 0) 1.727 us | } 0) | tick_do_update_jiffies64() { 0) 0.609 us | } 0) 8.055 us | } after: 0) | tick_check_idle() { 0) | ktime_get() { 0) | read_tod_clock() { 0) 0.617 us | } 0) 1.773 us | } 0) | tick_do_update_jiffies64() { 0) 0.593 us | } 0) 4.477 us | } Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: john stultz <johnstul@us.ibm.com> LKML-Reference: <20090929122533.206589318@de.ibm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/tick-sched.c62
1 files changed, 33 insertions, 29 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e0f59a21c061..7378e2c71ca6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -134,18 +134,13 @@ __setup("nohz=", setup_tick_nohz);
134 * value. We do this unconditionally on any cpu, as we don't know whether the 134 * value. We do this unconditionally on any cpu, as we don't know whether the
135 * cpu, which has the update task assigned is in a long sleep. 135 * cpu, which has the update task assigned is in a long sleep.
136 */ 136 */
137static void tick_nohz_update_jiffies(void) 137static void tick_nohz_update_jiffies(ktime_t now)
138{ 138{
139 int cpu = smp_processor_id(); 139 int cpu = smp_processor_id();
140 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 140 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
141 unsigned long flags; 141 unsigned long flags;
142 ktime_t now;
143
144 if (!ts->tick_stopped)
145 return;
146 142
147 cpumask_clear_cpu(cpu, nohz_cpu_mask); 143 cpumask_clear_cpu(cpu, nohz_cpu_mask);
148 now = ktime_get();
149 ts->idle_waketime = now; 144 ts->idle_waketime = now;
150 145
151 local_irq_save(flags); 146 local_irq_save(flags);
@@ -155,20 +150,17 @@ static void tick_nohz_update_jiffies(void)
155 touch_softlockup_watchdog(); 150 touch_softlockup_watchdog();
156} 151}
157 152
158static void tick_nohz_stop_idle(int cpu) 153static void tick_nohz_stop_idle(int cpu, ktime_t now)
159{ 154{
160 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 155 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
156 ktime_t delta;
161 157
162 if (ts->idle_active) { 158 delta = ktime_sub(now, ts->idle_entrytime);
163 ktime_t now, delta; 159 ts->idle_lastupdate = now;
164 now = ktime_get(); 160 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
165 delta = ktime_sub(now, ts->idle_entrytime); 161 ts->idle_active = 0;
166 ts->idle_lastupdate = now;
167 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
168 ts->idle_active = 0;
169 162
170 sched_clock_idle_wakeup_event(0); 163 sched_clock_idle_wakeup_event(0);
171 }
172} 164}
173 165
174static ktime_t tick_nohz_start_idle(struct tick_sched *ts) 166static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
@@ -431,7 +423,11 @@ void tick_nohz_restart_sched_tick(void)
431 ktime_t now; 423 ktime_t now;
432 424
433 local_irq_disable(); 425 local_irq_disable();
434 tick_nohz_stop_idle(cpu); 426 if (ts->idle_active || (ts->inidle && ts->tick_stopped))
427 now = ktime_get();
428
429 if (ts->idle_active)
430 tick_nohz_stop_idle(cpu, now);
435 431
436 if (!ts->inidle || !ts->tick_stopped) { 432 if (!ts->inidle || !ts->tick_stopped) {
437 ts->inidle = 0; 433 ts->inidle = 0;
@@ -445,7 +441,6 @@ void tick_nohz_restart_sched_tick(void)
445 441
446 /* Update jiffies first */ 442 /* Update jiffies first */
447 select_nohz_load_balancer(0); 443 select_nohz_load_balancer(0);
448 now = ktime_get();
449 tick_do_update_jiffies64(now); 444 tick_do_update_jiffies64(now);
450 cpumask_clear_cpu(cpu, nohz_cpu_mask); 445 cpumask_clear_cpu(cpu, nohz_cpu_mask);
451 446
@@ -579,22 +574,18 @@ static void tick_nohz_switch_to_nohz(void)
579 * timer and do not touch the other magic bits which need to be done 574 * timer and do not touch the other magic bits which need to be done
580 * when idle is left. 575 * when idle is left.
581 */ 576 */
582static void tick_nohz_kick_tick(int cpu) 577static void tick_nohz_kick_tick(int cpu, ktime_t now)
583{ 578{
584#if 0 579#if 0
585 /* Switch back to 2.6.27 behaviour */ 580 /* Switch back to 2.6.27 behaviour */
586 581
587 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 582 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
588 ktime_t delta, now; 583 ktime_t delta;
589
590 if (!ts->tick_stopped)
591 return;
592 584
593 /* 585 /*
594 * Do not touch the tick device, when the next expiry is either 586 * Do not touch the tick device, when the next expiry is either
595 * already reached or less/equal than the tick period. 587 * already reached or less/equal than the tick period.
596 */ 588 */
597 now = ktime_get();
598 delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); 589 delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
599 if (delta.tv64 <= tick_period.tv64) 590 if (delta.tv64 <= tick_period.tv64)
600 return; 591 return;
@@ -603,9 +594,26 @@ static void tick_nohz_kick_tick(int cpu)
603#endif 594#endif
604} 595}
605 596
597static inline void tick_check_nohz(int cpu)
598{
599 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
600 ktime_t now;
601
602 if (!ts->idle_active && !ts->tick_stopped)
603 return;
604 now = ktime_get();
605 if (ts->idle_active)
606 tick_nohz_stop_idle(cpu, now);
607 if (ts->tick_stopped) {
608 tick_nohz_update_jiffies(now);
609 tick_nohz_kick_tick(cpu, now);
610 }
611}
612
606#else 613#else
607 614
608static inline void tick_nohz_switch_to_nohz(void) { } 615static inline void tick_nohz_switch_to_nohz(void) { }
616static inline void tick_check_nohz(int cpu) { }
609 617
610#endif /* NO_HZ */ 618#endif /* NO_HZ */
611 619
@@ -615,11 +623,7 @@ static inline void tick_nohz_switch_to_nohz(void) { }
615void tick_check_idle(int cpu) 623void tick_check_idle(int cpu)
616{ 624{
617 tick_check_oneshot_broadcast(cpu); 625 tick_check_oneshot_broadcast(cpu);
618#ifdef CONFIG_NO_HZ 626 tick_check_nohz(cpu);
619 tick_nohz_stop_idle(cpu);
620 tick_nohz_update_jiffies();
621 tick_nohz_kick_tick(cpu);
622#endif
623} 627}
624 628
625/* 629/*