diff options
Diffstat (limited to 'kernel/sched/cputime.c')
-rw-r--r-- | kernel/sched/cputime.c | 41 |
1 files changed, 33 insertions, 8 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 1934f658c036..a846cf89eb96 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -263,6 +263,11 @@ void account_idle_time(cputime_t cputime) | |||
263 | cpustat[CPUTIME_IDLE] += (__force u64) cputime; | 263 | cpustat[CPUTIME_IDLE] += (__force u64) cputime; |
264 | } | 264 | } |
265 | 265 | ||
266 | /* | ||
267 | * When a guest is interrupted for a longer amount of time, missed clock | ||
268 | * ticks are not redelivered later. Due to that, this function may on | ||
269 | * occasion account more time than the calling functions think elapsed. | ||
270 | */ | ||
266 | static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) | 271 | static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) |
267 | { | 272 | { |
268 | #ifdef CONFIG_PARAVIRT | 273 | #ifdef CONFIG_PARAVIRT |
@@ -371,7 +376,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | |||
371 | * idle, or potentially user or system time. Due to rounding, | 376 | * idle, or potentially user or system time. Due to rounding, |
372 | * other time can exceed ticks occasionally. | 377 | * other time can exceed ticks occasionally. |
373 | */ | 378 | */ |
374 | other = account_other_time(cputime); | 379 | other = account_other_time(ULONG_MAX); |
375 | if (other >= cputime) | 380 | if (other >= cputime) |
376 | return; | 381 | return; |
377 | cputime -= other; | 382 | cputime -= other; |
@@ -486,7 +491,7 @@ void account_process_tick(struct task_struct *p, int user_tick) | |||
486 | } | 491 | } |
487 | 492 | ||
488 | cputime = cputime_one_jiffy; | 493 | cputime = cputime_one_jiffy; |
489 | steal = steal_account_process_time(cputime); | 494 | steal = steal_account_process_time(ULONG_MAX); |
490 | 495 | ||
491 | if (steal >= cputime) | 496 | if (steal >= cputime) |
492 | return; | 497 | return; |
@@ -508,13 +513,21 @@ void account_process_tick(struct task_struct *p, int user_tick) | |||
508 | */ | 513 | */ |
509 | void account_idle_ticks(unsigned long ticks) | 514 | void account_idle_ticks(unsigned long ticks) |
510 | { | 515 | { |
516 | cputime_t cputime, steal; | ||
511 | 517 | ||
512 | if (sched_clock_irqtime) { | 518 | if (sched_clock_irqtime) { |
513 | irqtime_account_idle_ticks(ticks); | 519 | irqtime_account_idle_ticks(ticks); |
514 | return; | 520 | return; |
515 | } | 521 | } |
516 | 522 | ||
517 | account_idle_time(jiffies_to_cputime(ticks)); | 523 | cputime = jiffies_to_cputime(ticks); |
524 | steal = steal_account_process_time(ULONG_MAX); | ||
525 | |||
526 | if (steal >= cputime) | ||
527 | return; | ||
528 | |||
529 | cputime -= steal; | ||
530 | account_idle_time(cputime); | ||
518 | } | 531 | } |
519 | 532 | ||
520 | /* | 533 | /* |
@@ -606,19 +619,25 @@ static void cputime_adjust(struct task_cputime *curr, | |||
606 | stime = curr->stime; | 619 | stime = curr->stime; |
607 | utime = curr->utime; | 620 | utime = curr->utime; |
608 | 621 | ||
609 | if (utime == 0) { | 622 | /* |
610 | stime = rtime; | 623 | * If either stime or both stime and utime are 0, assume all runtime is |
624 | * userspace. Once a task gets some ticks, the monotonicy code at | ||
625 | * 'update' will ensure things converge to the observed ratio. | ||
626 | */ | ||
627 | if (stime == 0) { | ||
628 | utime = rtime; | ||
611 | goto update; | 629 | goto update; |
612 | } | 630 | } |
613 | 631 | ||
614 | if (stime == 0) { | 632 | if (utime == 0) { |
615 | utime = rtime; | 633 | stime = rtime; |
616 | goto update; | 634 | goto update; |
617 | } | 635 | } |
618 | 636 | ||
619 | stime = scale_stime((__force u64)stime, (__force u64)rtime, | 637 | stime = scale_stime((__force u64)stime, (__force u64)rtime, |
620 | (__force u64)(stime + utime)); | 638 | (__force u64)(stime + utime)); |
621 | 639 | ||
640 | update: | ||
622 | /* | 641 | /* |
623 | * Make sure stime doesn't go backwards; this preserves monotonicity | 642 | * Make sure stime doesn't go backwards; this preserves monotonicity |
624 | * for utime because rtime is monotonic. | 643 | * for utime because rtime is monotonic. |
@@ -641,7 +660,6 @@ static void cputime_adjust(struct task_cputime *curr, | |||
641 | stime = rtime - utime; | 660 | stime = rtime - utime; |
642 | } | 661 | } |
643 | 662 | ||
644 | update: | ||
645 | prev->stime = stime; | 663 | prev->stime = stime; |
646 | prev->utime = utime; | 664 | prev->utime = utime; |
647 | out: | 665 | out: |
@@ -686,6 +704,13 @@ static cputime_t get_vtime_delta(struct task_struct *tsk) | |||
686 | unsigned long now = READ_ONCE(jiffies); | 704 | unsigned long now = READ_ONCE(jiffies); |
687 | cputime_t delta, other; | 705 | cputime_t delta, other; |
688 | 706 | ||
707 | /* | ||
708 | * Unlike tick based timing, vtime based timing never has lost | ||
709 | * ticks, and no need for steal time accounting to make up for | ||
710 | * lost ticks. Vtime accounts a rounded version of actual | ||
711 | * elapsed time. Limit account_other_time to prevent rounding | ||
712 | * errors from causing elapsed vtime to go negative. | ||
713 | */ | ||
689 | delta = jiffies_to_cputime(now - tsk->vtime_snap); | 714 | delta = jiffies_to_cputime(now - tsk->vtime_snap); |
690 | other = account_other_time(delta); | 715 | other = account_other_time(delta); |
691 | WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); | 716 | WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); |