diff options
Diffstat (limited to 'kernel/sched/cputime.c')
| -rw-r--r-- | kernel/sched/cputime.c | 33 |
1 files changed, 25 insertions, 8 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 9858266fb0b3..a846cf89eb96 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
| @@ -263,6 +263,11 @@ void account_idle_time(cputime_t cputime) | |||
| 263 | cpustat[CPUTIME_IDLE] += (__force u64) cputime; | 263 | cpustat[CPUTIME_IDLE] += (__force u64) cputime; |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | /* | ||
| 267 | * When a guest is interrupted for a longer amount of time, missed clock | ||
| 268 | * ticks are not redelivered later. Due to that, this function may on | ||
| 269 | * occasion account more time than the calling functions think elapsed. | ||
| 270 | */ | ||
| 266 | static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) | 271 | static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) |
| 267 | { | 272 | { |
| 268 | #ifdef CONFIG_PARAVIRT | 273 | #ifdef CONFIG_PARAVIRT |
| @@ -371,7 +376,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | |||
| 371 | * idle, or potentially user or system time. Due to rounding, | 376 | * idle, or potentially user or system time. Due to rounding, |
| 372 | * other time can exceed ticks occasionally. | 377 | * other time can exceed ticks occasionally. |
| 373 | */ | 378 | */ |
| 374 | other = account_other_time(cputime); | 379 | other = account_other_time(ULONG_MAX); |
| 375 | if (other >= cputime) | 380 | if (other >= cputime) |
| 376 | return; | 381 | return; |
| 377 | cputime -= other; | 382 | cputime -= other; |
| @@ -486,7 +491,7 @@ void account_process_tick(struct task_struct *p, int user_tick) | |||
| 486 | } | 491 | } |
| 487 | 492 | ||
| 488 | cputime = cputime_one_jiffy; | 493 | cputime = cputime_one_jiffy; |
| 489 | steal = steal_account_process_time(cputime); | 494 | steal = steal_account_process_time(ULONG_MAX); |
| 490 | 495 | ||
| 491 | if (steal >= cputime) | 496 | if (steal >= cputime) |
| 492 | return; | 497 | return; |
| @@ -516,7 +521,7 @@ void account_idle_ticks(unsigned long ticks) | |||
| 516 | } | 521 | } |
| 517 | 522 | ||
| 518 | cputime = jiffies_to_cputime(ticks); | 523 | cputime = jiffies_to_cputime(ticks); |
| 519 | steal = steal_account_process_time(cputime); | 524 | steal = steal_account_process_time(ULONG_MAX); |
| 520 | 525 | ||
| 521 | if (steal >= cputime) | 526 | if (steal >= cputime) |
| 522 | return; | 527 | return; |
| @@ -614,19 +619,25 @@ static void cputime_adjust(struct task_cputime *curr, | |||
| 614 | stime = curr->stime; | 619 | stime = curr->stime; |
| 615 | utime = curr->utime; | 620 | utime = curr->utime; |
| 616 | 621 | ||
| 617 | if (utime == 0) { | 622 | /* |
| 618 | stime = rtime; | 623 | * If either stime or both stime and utime are 0, assume all runtime is |
| 624 | * userspace. Once a task gets some ticks, the monotonicy code at | ||
| 625 | * 'update' will ensure things converge to the observed ratio. | ||
| 626 | */ | ||
| 627 | if (stime == 0) { | ||
| 628 | utime = rtime; | ||
| 619 | goto update; | 629 | goto update; |
| 620 | } | 630 | } |
| 621 | 631 | ||
| 622 | if (stime == 0) { | 632 | if (utime == 0) { |
| 623 | utime = rtime; | 633 | stime = rtime; |
| 624 | goto update; | 634 | goto update; |
| 625 | } | 635 | } |
| 626 | 636 | ||
| 627 | stime = scale_stime((__force u64)stime, (__force u64)rtime, | 637 | stime = scale_stime((__force u64)stime, (__force u64)rtime, |
| 628 | (__force u64)(stime + utime)); | 638 | (__force u64)(stime + utime)); |
| 629 | 639 | ||
| 640 | update: | ||
| 630 | /* | 641 | /* |
| 631 | * Make sure stime doesn't go backwards; this preserves monotonicity | 642 | * Make sure stime doesn't go backwards; this preserves monotonicity |
| 632 | * for utime because rtime is monotonic. | 643 | * for utime because rtime is monotonic. |
| @@ -649,7 +660,6 @@ static void cputime_adjust(struct task_cputime *curr, | |||
| 649 | stime = rtime - utime; | 660 | stime = rtime - utime; |
| 650 | } | 661 | } |
| 651 | 662 | ||
| 652 | update: | ||
| 653 | prev->stime = stime; | 663 | prev->stime = stime; |
| 654 | prev->utime = utime; | 664 | prev->utime = utime; |
| 655 | out: | 665 | out: |
| @@ -694,6 +704,13 @@ static cputime_t get_vtime_delta(struct task_struct *tsk) | |||
| 694 | unsigned long now = READ_ONCE(jiffies); | 704 | unsigned long now = READ_ONCE(jiffies); |
| 695 | cputime_t delta, other; | 705 | cputime_t delta, other; |
| 696 | 706 | ||
| 707 | /* | ||
| 708 | * Unlike tick based timing, vtime based timing never has lost | ||
| 709 | * ticks, and no need for steal time accounting to make up for | ||
| 710 | * lost ticks. Vtime accounts a rounded version of actual | ||
| 711 | * elapsed time. Limit account_other_time to prevent rounding | ||
| 712 | * errors from causing elapsed vtime to go negative. | ||
| 713 | */ | ||
| 697 | delta = jiffies_to_cputime(now - tsk->vtime_snap); | 714 | delta = jiffies_to_cputime(now - tsk->vtime_snap); |
| 698 | other = account_other_time(delta); | 715 | other = account_other_time(delta); |
| 699 | WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); | 716 | WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); |
