aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/tick-sched.c77
1 files changed, 42 insertions, 35 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index da70c6db496c..81409bba2425 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -271,10 +271,10 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
271} 271}
272EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); 272EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
273 273
274static void tick_nohz_stop_sched_tick(struct tick_sched *ts) 274static void tick_nohz_stop_sched_tick(struct tick_sched *ts, ktime_t now)
275{ 275{
276 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; 276 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
277 ktime_t last_update, expires, now; 277 ktime_t last_update, expires;
278 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 278 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
279 u64 time_delta; 279 u64 time_delta;
280 int cpu; 280 int cpu;
@@ -282,8 +282,6 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
282 cpu = smp_processor_id(); 282 cpu = smp_processor_id();
283 ts = &per_cpu(tick_cpu_sched, cpu); 283 ts = &per_cpu(tick_cpu_sched, cpu);
284 284
285 now = tick_nohz_start_idle(cpu, ts);
286
287 /* 285 /*
288 * If this cpu is offline and it is the one which updates 286 * If this cpu is offline and it is the one which updates
289 * jiffies, then give up the assignment and let it be taken by 287 * jiffies, then give up the assignment and let it be taken by
@@ -444,6 +442,14 @@ out:
444 ts->sleep_length = ktime_sub(dev->next_event, now); 442 ts->sleep_length = ktime_sub(dev->next_event, now);
445} 443}
446 444
445static void __tick_nohz_idle_enter(struct tick_sched *ts)
446{
447 ktime_t now;
448
449 now = tick_nohz_start_idle(smp_processor_id(), ts);
450 tick_nohz_stop_sched_tick(ts, now);
451}
452
447/** 453/**
448 * tick_nohz_idle_enter - stop the idle tick from the idle task 454 * tick_nohz_idle_enter - stop the idle tick from the idle task
449 * 455 *
@@ -479,7 +485,7 @@ void tick_nohz_idle_enter(void)
479 * update of the idle time accounting in tick_nohz_start_idle(). 485 * update of the idle time accounting in tick_nohz_start_idle().
480 */ 486 */
481 ts->inidle = 1; 487 ts->inidle = 1;
482 tick_nohz_stop_sched_tick(ts); 488 __tick_nohz_idle_enter(ts);
483 489
484 local_irq_enable(); 490 local_irq_enable();
485} 491}
@@ -499,7 +505,7 @@ void tick_nohz_irq_exit(void)
499 if (!ts->inidle) 505 if (!ts->inidle)
500 return; 506 return;
501 507
502 tick_nohz_stop_sched_tick(ts); 508 __tick_nohz_idle_enter(ts);
503} 509}
504 510
505/** 511/**
@@ -540,39 +546,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
540 } 546 }
541} 547}
542 548
543/** 549static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
544 * tick_nohz_idle_exit - restart the idle tick from the idle task
545 *
546 * Restart the idle tick when the CPU is woken up from idle
547 * This also exit the RCU extended quiescent state. The CPU
548 * can use RCU again after this function is called.
549 */
550void tick_nohz_idle_exit(void)
551{ 550{
552 int cpu = smp_processor_id();
553 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
554#ifndef CONFIG_VIRT_CPU_ACCOUNTING 551#ifndef CONFIG_VIRT_CPU_ACCOUNTING
555 unsigned long ticks; 552 unsigned long ticks;
556#endif 553#endif
557 ktime_t now;
558
559 local_irq_disable();
560
561 WARN_ON_ONCE(!ts->inidle);
562
563 ts->inidle = 0;
564
565 if (ts->idle_active || ts->tick_stopped)
566 now = ktime_get();
567
568 if (ts->idle_active)
569 tick_nohz_stop_idle(cpu, now);
570
571 if (!ts->tick_stopped) {
572 local_irq_enable();
573 return;
574 }
575
576 /* Update jiffies first */ 554 /* Update jiffies first */
577 select_nohz_load_balancer(0); 555 select_nohz_load_balancer(0);
578 tick_do_update_jiffies64(now); 556 tick_do_update_jiffies64(now);
@@ -600,6 +578,35 @@ void tick_nohz_idle_exit(void)
600 ts->idle_exittime = now; 578 ts->idle_exittime = now;
601 579
602 tick_nohz_restart(ts, now); 580 tick_nohz_restart(ts, now);
581}
582
583/**
584 * tick_nohz_idle_exit - restart the idle tick from the idle task
585 *
586 * Restart the idle tick when the CPU is woken up from idle
587 * This also exit the RCU extended quiescent state. The CPU
588 * can use RCU again after this function is called.
589 */
590void tick_nohz_idle_exit(void)
591{
592 int cpu = smp_processor_id();
593 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
594 ktime_t now;
595
596 local_irq_disable();
597
598 WARN_ON_ONCE(!ts->inidle);
599
600 ts->inidle = 0;
601
602 if (ts->idle_active || ts->tick_stopped)
603 now = ktime_get();
604
605 if (ts->idle_active)
606 tick_nohz_stop_idle(cpu, now);
607
608 if (ts->tick_stopped)
609 tick_nohz_restart_sched_tick(ts, now);
603 610
604 local_irq_enable(); 611 local_irq_enable();
605} 612}