diff options
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r-- | kernel/time/tick-sched.c | 89 |
1 files changed, 56 insertions, 33 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index cb89fa8db110..63f24b550695 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * | 9 | * |
10 | * Started by: Thomas Gleixner and Ingo Molnar | 10 | * Started by: Thomas Gleixner and Ingo Molnar |
11 | * | 11 | * |
12 | * For licencing details see kernel-base/COPYING | 12 | * Distribute under GPLv2. |
13 | */ | 13 | */ |
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
@@ -143,6 +143,44 @@ void tick_nohz_update_jiffies(void) | |||
143 | local_irq_restore(flags); | 143 | local_irq_restore(flags); |
144 | } | 144 | } |
145 | 145 | ||
146 | void tick_nohz_stop_idle(int cpu) | ||
147 | { | ||
148 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
149 | |||
150 | if (ts->idle_active) { | ||
151 | ktime_t now, delta; | ||
152 | now = ktime_get(); | ||
153 | delta = ktime_sub(now, ts->idle_entrytime); | ||
154 | ts->idle_lastupdate = now; | ||
155 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | ||
156 | ts->idle_active = 0; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static ktime_t tick_nohz_start_idle(int cpu) | ||
161 | { | ||
162 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
163 | ktime_t now, delta; | ||
164 | |||
165 | now = ktime_get(); | ||
166 | if (ts->idle_active) { | ||
167 | delta = ktime_sub(now, ts->idle_entrytime); | ||
168 | ts->idle_lastupdate = now; | ||
169 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | ||
170 | } | ||
171 | ts->idle_entrytime = now; | ||
172 | ts->idle_active = 1; | ||
173 | return now; | ||
174 | } | ||
175 | |||
176 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | ||
177 | { | ||
178 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
179 | |||
180 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | ||
181 | return ktime_to_us(ts->idle_sleeptime); | ||
182 | } | ||
183 | |||
146 | /** | 184 | /** |
147 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task | 185 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task |
148 | * | 186 | * |
@@ -153,14 +191,16 @@ void tick_nohz_update_jiffies(void) | |||
153 | void tick_nohz_stop_sched_tick(void) | 191 | void tick_nohz_stop_sched_tick(void) |
154 | { | 192 | { |
155 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; | 193 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; |
194 | unsigned long rt_jiffies; | ||
156 | struct tick_sched *ts; | 195 | struct tick_sched *ts; |
157 | ktime_t last_update, expires, now, delta; | 196 | ktime_t last_update, expires, now; |
158 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 197 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
159 | int cpu; | 198 | int cpu; |
160 | 199 | ||
161 | local_irq_save(flags); | 200 | local_irq_save(flags); |
162 | 201 | ||
163 | cpu = smp_processor_id(); | 202 | cpu = smp_processor_id(); |
203 | now = tick_nohz_start_idle(cpu); | ||
164 | ts = &per_cpu(tick_cpu_sched, cpu); | 204 | ts = &per_cpu(tick_cpu_sched, cpu); |
165 | 205 | ||
166 | /* | 206 | /* |
@@ -192,19 +232,7 @@ void tick_nohz_stop_sched_tick(void) | |||
192 | } | 232 | } |
193 | } | 233 | } |
194 | 234 | ||
195 | now = ktime_get(); | ||
196 | /* | ||
197 | * When called from irq_exit we need to account the idle sleep time | ||
198 | * correctly. | ||
199 | */ | ||
200 | if (ts->tick_stopped) { | ||
201 | delta = ktime_sub(now, ts->idle_entrytime); | ||
202 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | ||
203 | } | ||
204 | |||
205 | ts->idle_entrytime = now; | ||
206 | ts->idle_calls++; | 235 | ts->idle_calls++; |
207 | |||
208 | /* Read jiffies and the time when jiffies were updated last */ | 236 | /* Read jiffies and the time when jiffies were updated last */ |
209 | do { | 237 | do { |
210 | seq = read_seqbegin(&xtime_lock); | 238 | seq = read_seqbegin(&xtime_lock); |
@@ -216,6 +244,10 @@ void tick_nohz_stop_sched_tick(void) | |||
216 | next_jiffies = get_next_timer_interrupt(last_jiffies); | 244 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
217 | delta_jiffies = next_jiffies - last_jiffies; | 245 | delta_jiffies = next_jiffies - last_jiffies; |
218 | 246 | ||
247 | rt_jiffies = rt_needs_cpu(cpu); | ||
248 | if (rt_jiffies && rt_jiffies < delta_jiffies) | ||
249 | delta_jiffies = rt_jiffies; | ||
250 | |||
219 | if (rcu_needs_cpu(cpu)) | 251 | if (rcu_needs_cpu(cpu)) |
220 | delta_jiffies = 1; | 252 | delta_jiffies = 1; |
221 | /* | 253 | /* |
@@ -291,7 +323,7 @@ void tick_nohz_stop_sched_tick(void) | |||
291 | /* Check, if the timer was already in the past */ | 323 | /* Check, if the timer was already in the past */ |
292 | if (hrtimer_active(&ts->sched_timer)) | 324 | if (hrtimer_active(&ts->sched_timer)) |
293 | goto out; | 325 | goto out; |
294 | } else if(!tick_program_event(expires, 0)) | 326 | } else if (!tick_program_event(expires, 0)) |
295 | goto out; | 327 | goto out; |
296 | /* | 328 | /* |
297 | * We are past the event already. So we crossed a | 329 | * We are past the event already. So we crossed a |
@@ -332,23 +364,22 @@ void tick_nohz_restart_sched_tick(void) | |||
332 | int cpu = smp_processor_id(); | 364 | int cpu = smp_processor_id(); |
333 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 365 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
334 | unsigned long ticks; | 366 | unsigned long ticks; |
335 | ktime_t now, delta; | 367 | ktime_t now; |
336 | 368 | ||
337 | if (!ts->tick_stopped) | 369 | local_irq_disable(); |
370 | tick_nohz_stop_idle(cpu); | ||
371 | |||
372 | if (!ts->tick_stopped) { | ||
373 | local_irq_enable(); | ||
338 | return; | 374 | return; |
375 | } | ||
339 | 376 | ||
340 | /* Update jiffies first */ | 377 | /* Update jiffies first */ |
341 | now = ktime_get(); | ||
342 | |||
343 | local_irq_disable(); | ||
344 | select_nohz_load_balancer(0); | 378 | select_nohz_load_balancer(0); |
379 | now = ktime_get(); | ||
345 | tick_do_update_jiffies64(now); | 380 | tick_do_update_jiffies64(now); |
346 | cpu_clear(cpu, nohz_cpu_mask); | 381 | cpu_clear(cpu, nohz_cpu_mask); |
347 | 382 | ||
348 | /* Account the idle time */ | ||
349 | delta = ktime_sub(now, ts->idle_entrytime); | ||
350 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | ||
351 | |||
352 | /* | 383 | /* |
353 | * We stopped the tick in idle. Update process times would miss the | 384 | * We stopped the tick in idle. Update process times would miss the |
354 | * time we slept as update_process_times does only a 1 tick | 385 | * time we slept as update_process_times does only a 1 tick |
@@ -502,14 +533,13 @@ static inline void tick_nohz_switch_to_nohz(void) { } | |||
502 | */ | 533 | */ |
503 | #ifdef CONFIG_HIGH_RES_TIMERS | 534 | #ifdef CONFIG_HIGH_RES_TIMERS |
504 | /* | 535 | /* |
505 | * We rearm the timer until we get disabled by the idle code | 536 | * We rearm the timer until we get disabled by the idle code. |
506 | * Called with interrupts disabled and timer->base->cpu_base->lock held. | 537 | * Called with interrupts disabled and timer->base->cpu_base->lock held. |
507 | */ | 538 | */ |
508 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | 539 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) |
509 | { | 540 | { |
510 | struct tick_sched *ts = | 541 | struct tick_sched *ts = |
511 | container_of(timer, struct tick_sched, sched_timer); | 542 | container_of(timer, struct tick_sched, sched_timer); |
512 | struct hrtimer_cpu_base *base = timer->base->cpu_base; | ||
513 | struct pt_regs *regs = get_irq_regs(); | 543 | struct pt_regs *regs = get_irq_regs(); |
514 | ktime_t now = ktime_get(); | 544 | ktime_t now = ktime_get(); |
515 | int cpu = smp_processor_id(); | 545 | int cpu = smp_processor_id(); |
@@ -547,15 +577,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
547 | touch_softlockup_watchdog(); | 577 | touch_softlockup_watchdog(); |
548 | ts->idle_jiffies++; | 578 | ts->idle_jiffies++; |
549 | } | 579 | } |
550 | /* | ||
551 | * update_process_times() might take tasklist_lock, hence | ||
552 | * drop the base lock. sched-tick hrtimers are per-CPU and | ||
553 | * never accessible by userspace APIs, so this is safe to do. | ||
554 | */ | ||
555 | spin_unlock(&base->lock); | ||
556 | update_process_times(user_mode(regs)); | 580 | update_process_times(user_mode(regs)); |
557 | profile_tick(CPU_PROFILING); | 581 | profile_tick(CPU_PROFILING); |
558 | spin_lock(&base->lock); | ||
559 | } | 582 | } |
560 | 583 | ||
561 | /* Do not restart, when we are in the idle loop */ | 584 | /* Do not restart, when we are in the idle loop */ |